repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ess-dmsc/do-ess-data-simulator
|
DonkiPlayer/DonkiOrchestraLib.py
|
1
|
7360
|
import zmq
import traceback
import socket
import time
class CommunicationClass:
def __init__(self, name='director'):
self.context = zmq.Context()
self.poller = zmq.Poller()
self.pub_sock = None
self.sub_socks = {}
self.pub_tag = name
#
self.create_pub_socket()
#-----------------------------------------------------------------------------------
# create_pub_socket:
#
#-----------------------------------------------------------------------------------
def create_pub_socket(self):
try:
self.pub_sock = self.context.socket(zmq.PUB)
self.pub_port = self.pub_sock.bind_to_random_port("tcp://0.0.0.0")
print "PUB " + "tcp://" + str(self.pub_port)
except:
traceback.print_exc()
self.pub_sock = None
#-----------------------------------------------------------------------------------
# create_sub_socket:
#
#-----------------------------------------------------------------------------------
def create_sub_socket(self, name, url):
try:
if name in self.sub_socks:
self.poller.unregister(self.sub_socks[name])
self.sub_socks[name].close()
self.sub_socks[name] = self.context.socket(zmq.SUB)
self.sub_socks[name].setsockopt(zmq.SUBSCRIBE, '')
self.sub_socks[name].connect("tcp://"+str(url))
self.poller.register(self.sub_socks[name], zmq.POLLIN)
#print "SUB TO " + "tcp://" + str(url),self.sub_socks[name]
except:
traceback.print_exc()
print "tcp://"+str(url)
del self.sub_socks[name]
return False
return True
#-----------------------------------------------------------------------------------
# my_pub_socket_info :
#
#-----------------------------------------------------------------------------------
def my_pub_socket_info(self):
return socket.gethostname()+":"+str(self.pub_port)
#-----------------------------------------------------------------------------------
# publish_ack :
#
#-----------------------------------------------------------------------------------
def publish_ack(self, ack_tag, trg_start, trg_stop):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj([ack_tag, trg_start,trg_stop])
#-----------------------------------------------------------------------------------
# publish_data :
#
#-----------------------------------------------------------------------------------
def publish_data(self, tag, trg_start, trg_stop, data_value):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(['data',tag.lower(), trg_start,trg_stop,data_value])
#-----------------------------------------------------------------------------------
# publish_info :
#
#-----------------------------------------------------------------------------------
def publish_info( self, priority = -1, data_names=[]):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(['info',{'prio':priority,'data':data_names}])
#-----------------------------------------------------------------------------------
# ask_for_info :
#
#-----------------------------------------------------------------------------------
def ask_for_info(self, srv_name, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["info", srv_name])
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
#if len(socks) == 0:
# return msg
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == 'info':
msg = reply[1]
except:
traceback.print_exc()
msg = []
retry += 1
return msg
#-----------------------------------------------------------------------------------
# ask_for_log :
#
#-----------------------------------------------------------------------------------
def ask_for_log(self, srv_name, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["playerlog", srv_name])
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
#if len(socks) == 0:
# return msg
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == 'data' and reply[1] == 'playerlog':
msg = reply[4]
except:
traceback.print_exc()
msg = []
retry += 1
return msg
#-----------------------------------------------------------------------------------
# wait_message :
#
#-----------------------------------------------------------------------------------
def wait_message(self, srv_names, timeout_sec=1):
try:
msg = {}
socks = dict(self.poller.poll(1000*timeout_sec))
if len(socks) == 0:
return msg
for sn in srv_names:
s = self.sub_socks[sn]
if s in socks and socks[s] == zmq.POLLIN:
recv_msg = s.recv_pyobj()
msg[sn] = recv_msg
except:
traceback.print_exc()
msg = None
return msg
#-----------------------------------------------------------------------------------
# publish_command :
#
#-----------------------------------------------------------------------------------
def publish_command(self, command, srv_name, argin=None, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj([command, srv_name, argin])
print "Sent command:", command, srv_name, argin
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == command and reply[1] == reply[2] == -1:
return True
except:
traceback.print_exc()
return False
retry += 1
return False
#-----------------------------------------------------------------------------------
# publish_trigger :
#
#-----------------------------------------------------------------------------------
def publish_trigger(self, trigger_value, priority):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["trigger", trigger_value, priority])
|
bsd-2-clause
| -3,240,108,281,225,997,000
| 37.134715
| 85
| 0.38125
| false
| 4.6494
| false
| false
| false
|
ragupta-git/ImcSdk
|
imcsdk/mometa/storage/StorageFlexFlashVirtualDrive.py
|
1
|
7941
|
"""This module contains the general information for StorageFlexFlashVirtualDrive ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class StorageFlexFlashVirtualDriveConsts:
ADMIN_ACTION_DISABLE_VD = "disable-vd"
ADMIN_ACTION_ENABLE_VD = "enable-vd"
ADMIN_ACTION_ERASE_VD = "erase-vd"
ADMIN_ACTION_SYNC_VD = "sync-vd"
ADMIN_ACTION_UPDATE_VD = "update-vd"
class StorageFlexFlashVirtualDrive(ManagedObject):
"""This is StorageFlexFlashVirtualDrive class."""
consts = StorageFlexFlashVirtualDriveConsts()
naming_props = set([u'partitionId'])
mo_meta = {
"classic": MoMeta("StorageFlexFlashVirtualDrive", "storageFlexFlashVirtualDrive", "vd-[partition_id]", VersionMeta.Version202c, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'storageFlexFlashController'], [u'faultInst'], ["Get", "Set"]),
"modular": MoMeta("StorageFlexFlashVirtualDrive", "storageFlexFlashVirtualDrive", "vd-[partition_id]", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'storageFlexFlashController'], [u'faultInst'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version202c, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["disable-vd", "enable-vd", "erase-vd", "sync-vd", "update-vd"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version202c, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version202c, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version202c, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version202c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"drive_scope": MoPropertyMeta("drive_scope", "driveScope", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_status": MoPropertyMeta("drive_status", "driveStatus", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_type": MoPropertyMeta("drive_type", "driveType", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"host_accessible": MoPropertyMeta("host_accessible", "hostAccessible", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"last_operation_status": MoPropertyMeta("last_operation_status", "lastOperationStatus", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"operation_in_progress": MoPropertyMeta("operation_in_progress", "operationInProgress", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"partition_id": MoPropertyMeta("partition_id", "partitionId", "string", VersionMeta.Version202c, MoPropertyMeta.NAMING, None, 0, 510, None, [], []),
"size": MoPropertyMeta("size", "size", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"virtual_drive": MoPropertyMeta("virtual_drive", "virtualDrive", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["disable-vd", "enable-vd", "erase-vd", "sync-vd", "update-vd"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"drive_scope": MoPropertyMeta("drive_scope", "driveScope", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_status": MoPropertyMeta("drive_status", "driveStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"drive_type": MoPropertyMeta("drive_type", "driveType", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"host_accessible": MoPropertyMeta("host_accessible", "hostAccessible", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"last_operation_status": MoPropertyMeta("last_operation_status", "lastOperationStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"operation_in_progress": MoPropertyMeta("operation_in_progress", "operationInProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"partition_id": MoPropertyMeta("partition_id", "partitionId", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, 0, 510, None, [], []),
"size": MoPropertyMeta("size", "size", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"virtual_drive": MoPropertyMeta("virtual_drive", "virtualDrive", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"adminAction": "admin_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"driveScope": "drive_scope",
"driveStatus": "drive_status",
"driveType": "drive_type",
"hostAccessible": "host_accessible",
"lastOperationStatus": "last_operation_status",
"operationInProgress": "operation_in_progress",
"partitionId": "partition_id",
"size": "size",
"virtualDrive": "virtual_drive",
},
"modular": {
"adminAction": "admin_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"driveScope": "drive_scope",
"driveStatus": "drive_status",
"driveType": "drive_type",
"hostAccessible": "host_accessible",
"lastOperationStatus": "last_operation_status",
"operationInProgress": "operation_in_progress",
"partitionId": "partition_id",
"size": "size",
"virtualDrive": "virtual_drive",
},
}
def __init__(self, parent_mo_or_dn, partition_id, **kwargs):
self._dirty_mask = 0
self.partition_id = partition_id
self.admin_action = None
self.status = None
self.child_action = None
self.drive_scope = None
self.drive_status = None
self.drive_type = None
self.host_accessible = None
self.last_operation_status = None
self.operation_in_progress = None
self.size = None
self.virtual_drive = None
ManagedObject.__init__(self, "StorageFlexFlashVirtualDrive", parent_mo_or_dn, **kwargs)
|
apache-2.0
| -984,964,799,914,068,200
| 65.175
| 258
| 0.625866
| false
| 3.607905
| false
| false
| false
|
ErnieAllen/qpid-dispatch
|
tests/system_tests_protocol_settings.py
|
1
|
16207
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import unittest2 as unittest
from system_test import TestCase, Qdrouterd, main_module
from proton.utils import BlockingConnection
import subprocess
X86_64_ARCH = "x86_64"
skip_test = True
# Dont skip tests on 64 bit architectures.
p = subprocess.Popen("uname -m", shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
if X86_64_ARCH in p.communicate()[0]:
skip_test = False
class MaxFrameMaxSessionFramesTest(TestCase):
"""System tests setting proton negotiated size max-frame-size and incoming-window"""
@classmethod
def setUpClass(cls):
'''Start a router'''
super(MaxFrameMaxSessionFramesTest, cls).setUpClass()
name = "MaxFrameMaxSessionFrames"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxFrameSize': '2048', 'maxSessionFrames': '10'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_frame_max_session_frames__max_sessions_default(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxFrameMaxSessionFrames.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# max-frame is from the config
self.assertTrue(' max-frame-size=2048,' in open_lines[0])
# channel-max is default
self.assertTrue(" channel-max=32767" in open_lines[0])
begin_lines = [s for s in log_lines if "-> @begin" in s]
# incoming-window is from the config
self.assertTrue(" incoming-window=10," in begin_lines[0] )
class MaxSessionsTest(TestCase):
"""System tests setting proton channel-max"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxSessionsTest, cls).setUpClass()
name = "MaxSessions"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxSessions': '10'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_sessions(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxSessions.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# channel-max is 10
self.assertTrue(" channel-max=9" in open_lines[0])
class MaxSessionsZeroTest(TestCase):
"""System tests setting proton channel-max"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxSessionsZeroTest, cls).setUpClass()
name = "MaxSessionsZero"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxSessions': '0'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_sessions_zero(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxSessionsZero.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# channel-max is 0. Should get proton default 32767
self.assertTrue(" channel-max=32767" in open_lines[0])
class MaxSessionsLargeTest(TestCase):
"""System tests setting proton channel-max"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxSessionsLargeTest, cls).setUpClass()
name = "MaxSessionsLarge"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxSessions': '500000'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_sessions_large(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxSessionsLarge.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# channel-max is 0. Should get proton default 32767
self.assertTrue(" channel-max=32767" in open_lines[0])
class MaxFrameSmallTest(TestCase):
"""System tests setting proton max-frame-size"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxFrameSmallTest, cls).setUpClass()
name = "MaxFrameSmall"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxFrameSize': '2'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_frame_small(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxFrameSmall.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# if frame size <= 512 proton set min of 512
self.assertTrue(" max-frame-size=512" in open_lines[0])
class MaxFrameDefaultTest(TestCase):
"""System tests setting proton max-frame-size"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxFrameDefaultTest, cls).setUpClass()
name = "MaxFrameDefault"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port()}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_frame_default(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxFrameDefault.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# if frame size not set then a default is used
self.assertTrue(" max-frame-size=16384" in open_lines[0])
class MaxSessionFramesDefaultTest(TestCase):
"""System tests setting proton max-frame-size"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(MaxSessionFramesDefaultTest, cls).setUpClass()
name = "MaxSessionFramesDefault"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port()}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_session_frames_default(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
if skip_test:
return self.skipTest("Test skipped on non-64 bit architectures")
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxSessionFramesDefault.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# if frame size not set then a default is used
self.assertTrue(" max-frame-size=16384" in open_lines[0])
begin_lines = [s for s in log_lines if "-> @begin" in s]
# incoming-window is from the config
self.assertTrue(" incoming-window=2147483647," in begin_lines[0])
class MaxFrameMaxSessionFramesZeroTest(TestCase):
"""
System tests setting proton negotiated size max-frame-size and incoming-window
when they are both zero. Frame size is bumped up to the minimum and capacity is
bumped up to have an incoming window of 1
"""
@classmethod
def setUpClass(cls):
'''Start a router'''
super(MaxFrameMaxSessionFramesZeroTest, cls).setUpClass()
name = "MaxFrameMaxSessionFramesZero"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'host': '0.0.0.0', 'port': cls.tester.get_port(), 'maxFrameSize': '0', 'maxSessionFrames': '0'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_max_frame_max_session_zero(self):
# Set up a connection to get the Open and a receiver to get a Begin frame in the log
if skip_test:
return self.skipTest("Test disabled on non-64 bit architectures")
bc = BlockingConnection(self.router.addresses[0])
bc.create_receiver("xxx")
bc.close()
with open('../setUpClass/MaxFrameMaxSessionFramesZero.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "-> @open" in s]
# max-frame gets set to protocol min
self.assertTrue(' max-frame-size=512,' in open_lines[0])
begin_lines = [s for s in log_lines if "-> @begin" in s]
# incoming-window is defaulted to 2^31-1
self.assertTrue(" incoming-window=2147483647," in begin_lines[0])
class ConnectorSettingsDefaultTest(TestCase):
"""
The internal logic for protocol settings in listener and connector
is common code. This test makes sure that defaults in the connector
config make it to the wire.
"""
inter_router_port = None
@staticmethod
def ssl_config(client_server, connection):
return [] # Over-ridden by RouterTestSsl
@classmethod
def setUpClass(cls):
"""Start two routers"""
super(ConnectorSettingsDefaultTest, cls).setUpClass()
def router(name, client_server, connection):
config = cls.ssl_config(client_server, connection) + [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
('listener', {'port': cls.tester.get_port()}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', 'server',
('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', 'client',
('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port,
'verifyHostname': 'no'}))
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def test_connector_default(self):
if skip_test:
return self.skipTest("Test disabled on non-64 bit architectures")
with open('../setUpClass/A.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "<- @open" in s]
# defaults
self.assertTrue(' max-frame-size=16384,' in open_lines[0])
self.assertTrue(' channel-max=32767,' in open_lines[0])
begin_lines = [s for s in log_lines if "<- @begin" in s]
# defaults
self.assertTrue(" incoming-window=2147483647," in begin_lines[0])
class ConnectorSettingsNondefaultTest(TestCase):
"""
The internal logic for protocol settings in listener and connector
is common code. This test makes sure that settings in the connector
config make it to the wire. The listener tests test the setting logic.
"""
inter_router_port = None
@staticmethod
def ssl_config(client_server, connection):
return [] # Over-ridden by RouterTestSsl
@classmethod
def setUpClass(cls):
"""Start two routers"""
super(ConnectorSettingsNondefaultTest, cls).setUpClass()
def router(name, client_server, connection):
config = cls.ssl_config(client_server, connection) + [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
('listener', {'port': cls.tester.get_port()}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', 'server',
('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', 'client',
('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port,
'maxFrameSize': '2048', 'maxSessionFrames': '10', 'maxSessions': '20',
'verifyHostname': 'no'}))
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
def test_connector_default(self):
with open('../setUpClass/A.log', 'r') as router_log:
log_lines = router_log.read().split("\n")
open_lines = [s for s in log_lines if "<- @open" in s]
# nondefaults
self.assertTrue(' max-frame-size=2048,' in open_lines[0])
self.assertTrue(' channel-max=19,' in open_lines[0])
begin_lines = [s for s in log_lines if "<- @begin" in s]
# nondefaults
self.assertTrue(" incoming-window=10," in begin_lines[0])
if __name__ == '__main__':
unittest.main(main_module())
|
apache-2.0
| -6,041,756,895,692,285,000
| 39.31592
| 127
| 0.604554
| false
| 3.805353
| true
| false
| false
|
acressity/acressity
|
narratives/forms.py
|
1
|
2019
|
from datetime import date
from django import forms
from narratives.models import Narrative
from django.forms.extras.widgets import SelectDateWidget
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.core.exceptions import PermissionDenied
TRANSFER_ACTION_CHOICES = (
('', '-----'),
(1, _('Transfer')),
(2, _('Copy')),
)
class NarrativeForm(forms.ModelForm):
date_created = forms.DateField(widget=SelectDateWidget(years=range(timezone.now().year, timezone.now().year - 110, -1)), required=False)
title = forms.CharField(widget=forms.TextInput(attrs={'class': 'larger', 'onfocus': 'if($(this).val()==this.defaultValue){$(this).val("")};', 'onblur': 'if($(this).val()==""){$(this).val(this.defaultValue)};'})) # default value moved to views.py
class Meta:
model = Narrative
exclude = ('gallery', 'author')
def __init__(self, *args, **kwargs):
self.author = kwargs.pop('author', None)
super(NarrativeForm, self).__init__(*args, **kwargs)
self.fields['experience'].queryset = self.author.experiences.all()
def save(self, commit=True):
instance = super(NarrativeForm, self).save(commit=False)
if self.author:
instance.author = self.author
if commit:
instance.save()
return instance
def clean_date_created(self):
date_created = self.cleaned_data.get('date_created')
if not date_created:
date_created = timezone.now()
return date_created
def clean_body(self):
body = self.cleaned_data.get('body')
if len(body) < 3:
raise forms.ValidationError('The narrative body needs a little more extrapolation')
return body
class NarrativeTransferForm(forms.ModelForm):
potential_actions = forms.ChoiceField(choices=TRANSFER_ACTION_CHOICES, required=False)
class Meta:
model = Narrative
fields = ('title',)
|
gpl-3.0
| -5,569,418,809,155,219,000
| 34.421053
| 250
| 0.65577
| false
| 3.867816
| false
| false
| false
|
rigetticomputing/grove
|
grove/tomography/state_tomography.py
|
1
|
11664
|
##############################################################################
# Copyright 2017-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging
import numpy as np
import matplotlib.pyplot as plt
from pyquil.quilbase import Pragma
from scipy.sparse import csr_matrix, coo_matrix
from pyquil.quil import Program
import grove.tomography.operator_utils
from grove.tomography.tomography import TomographyBase, TomographySettings, DEFAULT_SOLVER_KWARGS
from grove.tomography import tomography
import grove.tomography.utils as ut
import grove.tomography.operator_utils as o_ut
_log = logging.getLogger(__name__)
qt = ut.import_qutip()
cvxpy = ut.import_cvxpy()
UNIT_TRACE = 'unit_trace'
POSITIVE = 'positive'
DEFAULT_STATE_TOMO_SETTINGS = TomographySettings(
constraints={UNIT_TRACE},
solver_kwargs=DEFAULT_SOLVER_KWARGS
)
def _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops):
"""
Prepare the coefficient matrix for state tomography. This function uses sparse matrices
for much greater efficiency.
The coefficient matrix is defined as:
.. math::
C_{(jk)m} = \tr{\Pi_{s_j} \Lambda_k(P_m)} = \sum_{r}\pi_{jr}(\mathcal{R}_{k})_{rm}
where :math:`\Lambda_k(\cdot)` is the quantum map corresponding to the k-th pre-measurement
channel, i.e., :math:`\Lambda_k(\rho) = E_k \rho E_k^\dagger` where :math:`E_k` is the k-th
channel operator. This map can also be represented via its transfer matrix
:math:`\mathcal{R}_{k}`. In that case one also requires the overlap between the (generalized)
Pauli basis ops and the projection operators
:math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`.
See the grove documentation on tomography for detailed information.
:param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation.
:param list channel_ops: The pre-measurement channel operators as `qutip.Qobj`
:return: The coefficient matrix necessary to set up the binomial state tomography problem.
:rtype: scipy.sparse.csr_matrix
"""
channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek)) for ek in channel_ops]
# This bit could be more efficient but does not run super long and is thus preserved for
# readability.
pi_jr = csr_matrix(
[pauli_basis.project_op(n_j).toarray().ravel()
for n_j in readout_povm.ops])
# Dict used for constructing our sparse matrix, keys are tuples (row_index, col_index), values
# are the non-zero elements of the final matrix.
c_jk_m_elms = {}
# This explicitly exploits the sparsity of all operators involved
for k in range(len(channel_ops)):
pi_jr__rk_rm = (pi_jr * channel_transfer_matrices[k]).tocoo()
for (j, m, val) in ut.izip(pi_jr__rk_rm.row, pi_jr__rk_rm.col, pi_jr__rk_rm.data):
# The multi-index (j,k) is enumerated in column-major ordering (like Fortran arrays)
c_jk_m_elms[(j + k * readout_povm.pi_basis.dim, m)] = val.real
# create sparse matrix from COO-format (see scipy.sparse docs)
_keys, _values = ut.izip(*c_jk_m_elms.items())
_rows, _cols = ut.izip(*_keys)
c_jk_m = coo_matrix((list(_values), (list(_rows), list(_cols))),
shape=(readout_povm.pi_basis.dim * len(channel_ops),
pauli_basis.dim)).tocsr()
return c_jk_m
class StateTomography(TomographyBase):
"""
A StateTomography object encapsulates the result of quantum state estimation from tomographic
data. It provides convenience functions for visualization and computing state fidelities.
"""
__tomography_type__ = "STATE"
@staticmethod
def estimate_from_ssr(histograms, readout_povm, channel_ops, settings):
"""
Estimate a density matrix from single shot histograms obtained by measuring bitstrings in
the Z-eigenbasis after application of given channel operators.
:param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`.
:param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s.
:param TomographySettings settings: The solver and estimation settings.
:return: The generated StateTomography object.
:rtype: StateTomography
"""
nqc = len(channel_ops[0].dims[0])
pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc
pi_basis = readout_povm.pi_basis
if not histograms.shape[1] == pi_basis.dim: # pragma no coverage
raise ValueError("Currently tomography is only implemented for two-level systems.")
# prepare the log-likelihood function parameters, see documentation
n_kj = np.asarray(histograms)
c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops)
rho_m = cvxpy.Variable(pauli_basis.dim)
p_jk = c_jk_m * rho_m
obj = -n_kj.ravel() * cvxpy.log(p_jk)
p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order
# Default constraints:
# MLE must describe valid probability distribution
# i.e., for each k, p_jk must sum to one and be element-wise non-negative:
# 1. \sum_j p_jk == 1 for all k
# 2. p_jk >= 0 for all j, k
# where p_jk = \sum_m c_jk_m rho_m
constraints = [
p_jk >= 0,
np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1,
]
rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm)
for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0)
if POSITIVE in settings.constraints:
if tomography._SDP_SOLVER.is_functional():
constraints.append(rho_m_real_imag >> 0)
else: # pragma no coverage
_log.warning("No convex solver capable of semi-definite problems installed.\n"
"Dropping the positivity constraint on the density matrix.")
if UNIT_TRACE in settings.constraints:
# this assumes that the first element of the Pauli basis is always proportional to
# the identity
constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real)
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
_log.info("Starting convex solver")
prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs)
if prob.status != cvxpy.OPTIMAL: # pragma no coverage
_log.warning("Problem did not converge to optimal solution. "
"Solver settings: {}".format(settings.solver_kwargs))
return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings)
def __init__(self, rho_coeffs, pauli_basis, settings):
"""
Construct a StateTomography to encapsulate the result of estimating the quantum state from
a quantum tomography measurement.
:param numpy.ndarray r_est: The estimated quantum state represented in a given (generalized)
Pauli basis.
:param OperatorBasis pauli_basis: The employed (generalized) Pauli basis.
:param TomographySettings settings: The settings used to estimate the state.
"""
self.rho_coeffs = rho_coeffs
self.pauli_basis = pauli_basis
self.rho_est = sum((r_m * p_m for r_m, p_m in ut.izip(rho_coeffs, pauli_basis.ops)))
self.settings = settings
def fidelity(self, other):
"""
Compute the quantum state fidelity of the estimated state with another state.
:param qutip.Qobj other: The other quantum state.
:return: The fidelity, a real number between 0 and 1.
:rtype: float
"""
return qt.fidelity(self.rho_est, other)
def plot_state_histogram(self, ax):
"""
Visualize the complex matrix elements of the estimated state.
:param matplotlib.Axes ax: A matplotlib Axes object to plot into.
"""
title = "Estimated state"
nqc = int(round(np.log2(self.rho_est.data.shape[0])))
labels = ut.basis_labels(nqc)
return ut.state_histogram(self.rho_est, ax, title)
def plot(self):
"""
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
"""
width = 10
# The pleasing golden ratio.
height = width / 1.618
f = plt.figure(figsize=(width, height))
ax = f.add_subplot(111, projection="3d")
self.plot_state_histogram(ax)
return f
def state_tomography_programs(state_prep, qubits=None,
rotation_generator=tomography.default_rotations):
"""
Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program
"""
if qubits is None:
qubits = state_prep.get_qubits()
for tomography_program in rotation_generator(*qubits):
state_tomography_program = Program(Pragma("PRESERVE_BLOCK"))
state_tomography_program.inst(state_prep)
state_tomography_program.inst(tomography_program)
state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK"))
yield state_tomography_program
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False):
"""
Method to perform both a QPU and QVM state tomography, and use the latter as
as reference to calculate the fidelity of the former.
:param Program preparation_program: Program to execute.
:param int nsamples: Number of samples to take for the program.
:param QVMConnection|QPUConnection cxn: Connection on which to run the program.
:param list qubits: List of qubits for the program.
to use in the tomography analysis.
:param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run``
instead of ``cxn.run_and_measure``.
:return: The state tomogram.
:rtype: StateTomography
"""
return tomography._do_tomography(preparation_program, nsamples, cxn, qubits,
tomography.MAX_QUBITS_STATE_TOMO,
StateTomography, state_tomography_programs,
DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
|
apache-2.0
| -8,882,595,617,667,638,000
| 43.015094
| 100
| 0.650034
| false
| 3.650704
| false
| false
| false
|
ecohealthalliance/eidr-connect
|
.scripts/utils.py
|
1
|
1603
|
import re
import requests
import os
import functools
import json
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
GRITS_URL = os.environ.get("GRITS_URL", "https://grits.eha.io")
def clean(s):
return re.sub(r"\s+", " ", s).strip()
def clean_disease_name(s):
# Modifiers that make case counts more specific need to be treated
# specially because constraining counts for the general disease cannot be
# created from them.
# s = re.sub(r"^(Highly Pathogenic|Virulent|Suspected)", "", s, re.I)
# s = re.sub(" Serotype .+$", "", s, re.I)
# Remove hyphens
s = re.sub(r"\-", "", s)
s = re.sub(r"\(.*\)", "", s)
s = re.sub(r"\[.*\]", "", s)
return clean(s)
@lru_cache()
def lookup_geoname(name):
resp = requests.get(GRITS_URL + "/api/geoname_lookup/api/lookup", params={
"q": name
})
result = json.loads(resp.text)["hits"][0]["_source"]
del result["alternateNames"]
del result["rawNames"]
del result["asciiName"]
del result["cc2"]
del result["elevation"]
del result["dem"]
del result["timezone"]
del result["modificationDate"]
return result
@lru_cache()
def lookup_disease(name):
if len(name) == 0:
return None
resp = requests.get(GRITS_URL + "/api/v1/disease_ontology/lookup", params={
"q": name
})
result = resp.json()
first_result = next(iter(result["result"]), None)
if first_result:
return {
"id": first_result["id"],
"text": first_result["label"]
}
|
apache-2.0
| -169,896,742,788,247,500
| 24.870968
| 79
| 0.601996
| false
| 3.238384
| false
| false
| false
|
kevin-intel/scikit-learn
|
sklearn/datasets/_openml.py
|
2
|
34451
|
import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ['fetch_openml']
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', openml_path + ".gz")
def _retry_with_clean_cache(
openml_path: str, data_home: Optional[str]
) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + openml_path)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode='rb')
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, 'rb')
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff['data']
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError(
"shape must be provided when arr['data'] is a Generator"
)
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(itertools.chain.from_iterable(arff_data),
dtype='float64', count=count)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape, dtype=np.float64)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError('Unexpected Data Type obtained from arff.')
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame
"""
if feature['data_type'] == 'string':
return object
elif feature['data_type'] == 'nominal':
return 'category'
# only numeric, integer, real are left
elif (feature['number_of_missing_values'] != '0' or
feature['data_type'] in ['numeric', 'real']):
# cast to floats when there are any missing values
return np.float64
elif feature['data_type'] == 'integer':
return np.int64
raise ValueError('Unsupported feature: {}'.format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support('fetch_openml with as_frame=True')
attributes = OrderedDict(arff['attributes'])
arff_columns = list(attributes)
if not isinstance(arff['data'], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff['data'])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff['data'], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == 'category':
cats_without_missing = [cat for cat in attributes[column]
if cat is not None and
not is_scalar_nan(cat)]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df, )
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data['data']['dataset']
if len(res) > 1:
warn("Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]['version']))
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name,
version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data['data']['dataset'][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_set_description']
def _get_data_features(
data_id: int, data_home: Optional[str]
) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_features']['feature']
def _get_data_qualities(
data_id: int, data_home: Optional[str]
) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get('data_qualities', {}).get('quality', [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d['name']: d['value'] for d in data_qualities}
return int(float(qualities.get('NumberOfInstances', default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type, encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode('utf-8')
stream = _stream_checksum_generator(response)
arff = _arff.load(stream,
return_type=return_type,
encode_nominal=encode_nominal)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError("md5 checksum of local file for " + url +
" does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry...")
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
):
"""Download OpenML ARFF and convert to Bunch of data
"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature['name']: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]['index'])
for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]['index'])
for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat['number_of_missing_values'])
if nr_missing > 0:
raise ValueError('Target column {} has {} missing values. '
'Missing values are not supported for target '
'columns. '.format(feat['name'], nr_missing))
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(_convert_arff_data_dataframe, columns=columns,
features_dict=features_dict)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {k: v for k, v in arff['attributes']
if isinstance(v, list) and
k in data_columns + target_columns}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {col_name in nominal_attributes
for col_name in target_columns}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack([
np.take(
np.asarray(nominal_attributes.pop(col_name),
dtype='O'),
y[:, i:i + 1].astype(int, copy=False))
for i, col_name in enumerate(target_columns)
])
elif any(is_classification):
raise ValueError('Mix of nominal and non-nominal targets is '
'not currently supported')
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(
_load_arff_response)(url, data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(data=X, target=y, frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError('target_column should be list, '
'got: %s' % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError('Could not find target_column={}')
if features_dict[target_column]['data_type'] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]['is_ignore'] == 'true':
warn('target_column={} has flag is_ignore.'.format(
target_column))
if features_dict[target_column]['is_row_identifier'] == 'true':
warn('target_column={} has flag is_row_identifier.'.format(
target_column))
if len(found_types) > 1:
raise ValueError('Can only handle homogeneous multi-target datasets, '
'i.e., all targets are either numeric or '
'categorical.')
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (feature['name'] not in target_columns
and feature['is_ignore'] != 'true'
and feature['is_row_identifier'] != 'true'):
valid_data_column_names.append(feature['name'])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = 'active',
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = 'default-target',
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = 'auto'
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'openml')
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name))
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info['did']
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version))
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or "
"data_id.")
data_description = _get_data_description_by_id(data_id, data_home)
if data_description['status'] != "active":
warn("Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description['version'],
data_description['name'],
data_description['url']))
if 'error' in data_description:
warn("OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description['error']))
if 'warning' in data_description:
warn("OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description['warning']))
return_sparse = False
if data_description['format'].lower() == 'sparse_arff':
return_sparse = True
if as_frame == 'auto':
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError('Cannot return dataframe with sparse data')
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if 'true' in (feature['is_ignore'], feature['is_row_identifier']):
continue
if feature['data_type'] == 'string':
raise ValueError('STRING attributes are not supported for '
'array representation. Try as_frame=True')
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [feature['name'] for feature in features_list
if feature['is_target'] == 'true']
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError("Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column)))
data_columns = _valid_data_column_names(features_list,
target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description['file_id'])
bunch = _download_data_to_bunch(url, return_sparse, data_home,
as_frame=bool(as_frame),
features_list=features_list, shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description[
"md5_checksum"])
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop('description'))
bunch.update(
DESCR=description, details=data_description,
url="https://www.openml.org/d/{}".format(data_id))
return bunch
|
bsd-3-clause
| 66,852,054,886,861,020
| 35.845989
| 79
| 0.604017
| false
| 4.03313
| false
| false
| false
|
yoazmenda/Hearthstone_deck_builder
|
run_games.py
|
1
|
1724
|
import json
from hearthbreaker.agents.basic_agents import RandomAgent
from hearthbreaker.cards.heroes import hero_for_class
from hearthbreaker.constants import CHARACTER_CLASS
from hearthbreaker.engine import Game, Deck, card_lookup
from hearthbreaker.cards import *
import timeit
def load_deck(filename):
cards = []
character_class = CHARACTER_CLASS.MAGE
with open(filename, "r") as deck_file:
contents = deck_file.read()
items = contents.splitlines()
for line in items[0:]:
parts = line.split(" ", 1)
count = int(parts[0])
for i in range(0, count):
card = card_lookup(parts[1])
if card.character_class != CHARACTER_CLASS.ALL:
character_class = card.character_class
cards.append(card)
if len(cards) > 30:
pass
return Deck(cards, hero_for_class(character_class))
def do_stuff():
_count = 0
def play_game():
nonlocal _count
_count += 1
new_game = game.copy()
try:
new_game.start()
except Exception as e:
print(json.dumps(new_game.__to_json__(), default=lambda o: o.__to_json__(), indent=1))
print(new_game._all_cards_played)
raise e
#winner
#print(new_game.players[0].hero.dead)
del new_game
if _count % 1000 == 0:
print("---- game #{} ----".format(_count))
deck1 = load_deck("zoo.hsdeck")
deck2 = load_deck("zoo.hsdeck")
game = Game([deck1, deck2], [RandomAgent(), RandomAgent()])
print(timeit.timeit(play_game, 'gc.enable()', number=2000))
|
mit
| -2,154,089,804,845,221,400
| 27.733333
| 98
| 0.563805
| false
| 3.723542
| false
| false
| false
|
donlorenzo/AdvancedConfigParser
|
src/AdvancedConfigParser.py
|
1
|
17728
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2014 Lorenz Quack
# This code is released under the MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
u"""
AdvancedConfigParser
parse config files written in a .ini-file like style.
In addition to ini files this module has the following advanced features:
* arbitrarily nested subsections
* various (nested) types including int, float, str, list, dict
* various calculations in values
* refer to other keys in values
Example:
global_var = True
[Section_1]
pi = 3.141
[[Sub_Sec_1]]
tau = 2 * pi
[whatever]
foo = [Section_1.pi, Section_1.Sub_Section_1.tau, global_var]
bar = max(foo)
baz = foo if Section_1.pi < 2**2 < Section_1.Sub_Sec_1.tau/2 or True else bar
Configuration can be loaded from strings (parse_string()),
files (parse_file()) or file-like objects (parse_stream()).
Access to the sections and options is done by attribute access:
>>> config = AdvancedConfigParser.parse_string("filename")
>>> print(config.global_var)
>>> print(config.Section_1.pi + config.whatever.bar)
"""
try:
import __builtin__ as builtins
except ImportError:
import builtins
import io
import re
import ast
import operator
from ast_to_src import ast_to_src
def parse_file(filename):
with open(filename) as f:
return parse_stream(f)
def parse_string(s):
return parse_stream(io.StringIO(s))
def parse_stream(stream):
"""
parse the stream into a hirarchical tree of (sub-)sections and options.
return the root/global section.
"""
root = current_section = Section()
current_section._acp_name = "<global>"
current_nesting_level = 0
line = 0
while True:
buf = ""
tmp = stream.readline()
line += 1
if tmp == "":
break
buf += tmp
stripped_buf = buf.strip()
# preserve empty lines
if not stripped_buf:
current_section._acp_add_empty_line()
# ignore comments
elif stripped_buf.startswith("#"):
current_section._acp_add_comment(stripped_buf)
# handle section header
elif stripped_buf.startswith("["):
result = re.match(r"(\[+)([^\d\W]\w*)(\]+)", stripped_buf)
if result is None:
msg = "malformed section header in line {line}:\n{tmp}"
raise SyntaxError(msg.format(**locals()))
if len(result.group(1)) != len(result.group(3)):
msg = "section braket mismatch in line {line}:\n{tmp}"
raise SyntaxError(msg.format(**locals()))
level = min(len(result.group(1)), len(result.group(3)))
if level > current_nesting_level + 1:
msg = "wrong section nesting in line {line}"
raise SyntaxError(msg.format(**locals()))
while current_nesting_level >= level:
current_section = current_section._acp_parent
current_nesting_level -= 1
section_name = ast.parse(result.group(2)).body[0].value.id
if section_name in list(current_section._acp_section_names()):
msg = 'duplicate section "{section_name}".'.format(**locals())
raise SyntaxError(msg)
new_section = Section()
new_section._acp_name = section_name
current_section._acp_add_child(new_section)
current_section = new_section
current_nesting_level += 1
# handle options
else:
node = None
while node is None and tmp != "":
try:
node = ast.parse(stripped_buf)
except SyntaxError:
tmp = stream.readline()
buf += tmp
stripped_buf = buf.strip()
node = node.body[0]
assert isinstance(node, ast.Assign)
option_name = node.targets[0].id
if option_name in list(current_section._acp_option_names()):
msg = ('duplicate option "{option_name}" in '
'section "{current_section._acp_name}".')
raise SyntaxError(msg.format(**locals()))
new_option = Option()
new_option._acp_name = option_name
new_option._acp_value = node.value
current_section._acp_add_child(new_option)
return root
class Section(object):
"""
Section objects allow access to their sub-sections and options via
attribute access and subscript.
new sections and options may be added via "_acp_add_child()".
"""
def __init__(self):
self.__dict__["_acp_name"] = ""
self.__dict__["_acp_parent"] = None
self.__dict__["_acp_order"] = []
self.__dict__["_acp_nesting_level"] = 0
def __str__(self):
return '<Section "{self._acp_name}">'.format(**locals())
__repr__ = __str__
def __setattr__(self, attr, val):
obj = object.__getattribute__(self, attr)
if isinstance(obj, Option):
obj._acp_value = val
else:
super(Section, self).__setattr__(attr, val)
def __getattribute__(self, attr, raw=False):
obj = super(Section, self).__getattribute__(attr)
if isinstance(obj, Option) and not raw:
return obj._acp_value
else:
return obj
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as e:
raise KeyError(str(e))
def _acp_add_child(self, child):
child._acp_nesting_level = self._acp_nesting_level + 1
if child._acp_parent is None:
child._acp_parent = self
if child._acp_name in self.__dict__:
msg = "duplicate object: {child_name}"
raise SyntaxError(msg.format(child_name=child._acp_name))
self.__dict__[child._acp_name] = child
self.__dict__["_acp_order"].append(child._acp_name)
def _acp_add_empty_line(self):
self.__dict__["_acp_order"].append("\n")
def _acp_add_comment(self, comment):
self.__dict__["_acp_order"].append(comment)
def _acp_sections(self):
for section in (section for section in self.__dict__.values()
if isinstance(section, Section)):
yield section
def _acp_section_names(self):
for section_name in (sn for (sn, s) in self.__dict__.items()
if isinstance(s, Section)):
yield section_name
def _acp_options(self):
for option in (option for option in self.__dict__.values()
if isinstance(option, Option)):
yield option
def _acp_option_names(self):
for option_name in (o_name for o_name, option in self.__dict__.items()
if isinstance(option, Option)):
yield option_name
def _acp_children(self):
for child in (child for child in self.__dict__.values()
if isinstance(child, (Section, Option))):
yield child
def dump(self):
return self.pretty_print(do_indent=False)
def pretty_print(self, indent=0, do_indent=True):
if self._acp_name != "<global>":
template = "{indentation}{left}{section_name}{right}\n"
s = template.format(indentation=" " * indent,
left="[" * self._acp_nesting_level,
right="]" * self._acp_nesting_level,
section_name=self._acp_name)
if do_indent:
indent += 1
else:
s = ""
for child_name in self._acp_order:
if child_name == "\n":
s += "\n"
elif child_name.strip().startswith("#"):
s += "{indent}{comment}\n".format(indent=" " * indent,
comment=child_name)
else:
child = getattr(self, child_name)
if isinstance(child, Section):
s += child.pretty_print(indent)
else:
child_raw = self._acp_get_raw_option(child_name)
template = "{indentation}{option_name} = {option_raw}\n"
s += template.format(indentation=" " * indent,
option_name=child_name,
option_raw=child_raw)
return s
def _acp_get_raw_option(self, option_name):
return self.__getattribute__(option_name, True)._acp_raw_value
class LazyEval(object):
"""
evaluates the ast nodes lazy when used as a descriptor.
when we find that all involved ast-nodes are static we cache the result.
"""
def __init__(self):
self.cache = {}
def __get__(self, instance, owner):
# see if we already cached the result from a previous evaluation
if instance in self.cache:
return self.cache[instance]
# dynamically evaluate the ast-nodes
val, has_refs = self._acp_eval(instance._acp_parent,
instance._acp_ast_node)
# if the ast-nodes have no external references cache the result
if not has_refs:
self.cache[instance] = val
return val
def __set__(self, instance, value):
# if value is a ast-node invalidate the cache
if isinstance(value, ast.AST):
instance._acp_ast_node = value
try:
del self.cache[instance]
except KeyError:
pass
# else it is a static value which can be put directly into the cache
else:
self.cache[instance] = value
def _acp_eval(self, parent, node):
"""
dynamically and recursively evaluate the ast-nodes.
returns a 2-tuple. first is the actual value, second a bool indicating
if this ast-node has external dependencies and should not be cached.
"""
# first try simple conversion of literals
try:
return ast.literal_eval(node), False
except (SyntaxError, ValueError):
pass
# handle external references
if isinstance(node, (ast.Name, ast.Attribute)):
ref = ""
while isinstance(node, ast.Attribute):
ref = "." + node.attr + ref
node = node.value
ref = node.id + ref
return self._acp_resolve_reference(ref, parent), True
# handle lists, tuples and dicts
elif isinstance(node, (ast.List, ast.Tuple, ast.Dict)):
vals = []
has_refs = False
for child_node in ast.iter_child_nodes(node):
tmp = self._acp_eval(parent, child_node)
if not tmp:
continue
vals.append(tmp[0])
has_refs = tmp[1]
if isinstance(node, ast.List):
return list(vals), has_refs
elif isinstance(node, ast.Tuple):
return tuple(vals), has_refs
return vals, has_refs
# handle the following math operators +, -, *, /, //, %, **, |, &, ^
elif isinstance(node, ast.BinOp):
lhs, lhs_has_refs = self._acp_eval(parent, node.left)
rhs, rhs_has_refs = self._acp_eval(parent, node.right)
ops = {ast.Add: operator.add, ast.Sub: operator.sub,
ast.Mult: operator.mul, ast.Div: operator.truediv,
ast.FloorDiv: operator.floordiv, ast.Mod: operator.mod,
ast.Pow: operator.pow, ast.LShift: operator.lshift,
ast.RShift: operator.rshift, ast.BitOr: operator.or_,
ast.BitXor: operator.xor, ast.BitAnd: operator.and_,}
if node.op.__class__ in ops:
return (ops[node.op.__class__](lhs, rhs),
lhs_has_refs | rhs_has_refs)
else:
msg = 'op "{op_name}" not supported yet'
raise SyntaxError(msg.format(op_name=str(node.op.__class__)))
# handle calls to some selected builtin functions
elif isinstance(node, ast.Call):
if node.func.id in ("abs", "all", "any", "bin", "bool", "chr",
"complex", "dict", "divmod", "enumerate",
"float", "hex", "int", "len", "list", "max",
"min", "oct", "ord", "pow", "range", "reversed",
"round", "set", "sorted", "str", "sum", "tuple",
"type", "unichr", "zip", ):
has_refs = False
args = []
for arg_node in node.args:
arg, temp_has_refs = self._acp_eval(parent, arg_node)
args.append(arg)
has_refs |= temp_has_refs
kwargs = {}
for keyword_node in node.keywords:
kwargs[keyword_node.arg], temp_has_refs = self._acp_eval(parent, keyword_node.value)
has_refs |= temp_has_refs
return (builtins.__dict__[node.func.id](*args, **kwargs),
has_refs)
# handle ternary if operator
elif isinstance(node, ast.IfExp):
test, test_has_refs = self._acp_eval(parent, node.test)
if test:
result, has_refs = self._acp_eval(parent, node.body)
else:
result, has_refs = self._acp_eval(parent, node.orelse)
return result, has_refs | test_has_refs
# handle compares
elif isinstance(node, ast.Compare):
astOp2FuncOp = {ast.Eq: operator.eq, ast.NotEq: operator.ne,
ast.Lt: operator.lt, ast.LtE: operator.le,
ast.Gt: operator.gt, ast.GtE: operator.ge,
ast.Is: operator.is_, ast.IsNot: operator.is_not,
# don't use contains because arguments are reversed
ast.In: lambda a, b: a in b,
ast.NotIn: lambda a, b: a not in b}
left, left_has_refs = self._acp_eval(parent, node.left)
has_refs = left_has_refs
for ast_op, ast_right in zip(node.ops, node.comparators):
right, right_has_refs = self._acp_eval(parent, ast_right)
has_refs |= right_has_refs
op = astOp2FuncOp[ast_op.__class__]
if op(left, right):
left = right
else:
return False, has_refs
return True, has_refs
# handle boolean operators
elif isinstance(node, ast.BoolOp):
has_refs = False
if node.op.__class__ == ast.And:
for value in node.values:
v, value_has_refs = self._acp_eval(parent, value)
has_refs |= value_has_refs
if not v:
return False, has_refs
return True, has_refs
elif node.op.__class__ == ast.Or:
for value in node.values:
v, value_has_refs = self._acp_eval(parent, value)
has_refs |= value_has_refs
if v:
return True, has_refs
return False, has_refs
raise RuntimeError("unreachable")
# not sure what this is about...
elif isinstance(node, ast.Load):
pass
else:
raise RuntimeError("unhandled node: " + str(node))
@classmethod
def _acp_resolve_reference(cls, ref, parent):
"""
resolves external references by walking up the tree
until we find a complete match
"""
attrs = ref.split(".")
while parent is not None:
try:
obj = parent
for attr in attrs:
obj = getattr(obj, attr)
return obj
except (KeyError, AttributeError):
parent = parent._acp_parent
raise AttributeError(ref)
class Option(object):
def __init__(self):
self._acp_name = ""
self._acp_parent = None
self._acp_has_refs = True
self._acp_nesting_level = 0
self._acp_ast_node = None
def _acp_get_raw_value(self):
return ast_to_src(self._acp_ast_node)
_acp_value = LazyEval()
_acp_raw_value = property(_acp_get_raw_value)
def __str__(self):
return '<Option {self._acp_name}>'.format(**locals())
__repr__ = __str__
|
mit
| -2,125,530,160,059,667,500
| 38.838202
| 104
| 0.542137
| false
| 4.147871
| true
| false
| false
|
rmanoni/mi-instrument
|
mi/instrument/kut/ek60/ooicore/driver.py
|
1
|
39253
|
"""
@package mi.instrument.kut.ek60.ooicore.driver
@file /mi/instrument/kut/ek60/ooicore/driver.py
@author Richard Han
@brief Driver for the ooicore
Release notes:
This Driver supports the Kongsberg UnderWater Technology's EK60 Instrument.
"""
__author__ = 'Richard Han & Craig Risien'
__license__ = 'Apache 2.0'
import ftplib
import json
import tempfile
import urllib2
import yaml
from mi.core.common import BaseEnum
from mi.core.exceptions import InstrumentParameterException, InstrumentException, SampleException
from mi.core.exceptions import InstrumentConnectionException
from mi.core.instrument.data_particle import DataParticle, CommonDataParticleType, DataParticleKey
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.log import get_logger
from mi.core.log import get_logging_metaclass
log = get_logger()
# newline.
NEWLINE = '\r\n'
# Default Instrument's IP Address
DEFAULT_HOST = "128.193.64.201"
YAML_FILE_NAME = "driver_schedule.yaml"
DEFAULT_PORT = "80"
USER_NAME = "ooi"
PASSWORD = "994ef22"
DEFAULT_CONFIG = {
'file_prefix': "Driver DEFAULT CONFIG_PREFIX",
'file_path': "DEFAULT_FILE_PATH", # relative to filesystem_root/data
'max_file_size': 288, # 50MB in bytes: 50 * 1024 * 1024
'intervals': [{
'name': "default",
'type': "constant",
'start_at': "00:00",
'duration': "00:15:00",
'repeat_every': "01:00",
'stop_repeating_at': "23:55",
'interval': 1000,
'max_range': 80,
'frequency': {
38000: {
'mode': 'active',
'power': 100,
'pulse_length': 256
},
120000: {
'mode': 'active',
'power': 100,
'pulse_length': 64
},
200000: {
'mode': 'active',
'power': 120,
'pulse_length': 64
}
}
}]
}
###
# Driver Constant Definitions
###
# String constants
CONNECTED = "connected"
CURRENT_RAW_FILENAME = "current_raw_filename"
CURRENT_RAW_FILESIZE = "current_raw_filesize"
CURRENT_RUNNING_INTERVAL = "current_running_interval"
CURRENT_UTC_TIME = "current_utc_time"
DURATION = "duration"
ER60_CHANNELS = "er60_channels"
ER60_STATUS = "er60_status"
EXECUTABLE = "executable"
FILE_PATH = "file_path"
FILE_PREFIX = "file_prefix"
FREQUENCY = "frequency"
FREQ_120K = "120000"
FREQ_200K = "200000"
FREQ_38K = "38000"
FS_ROOT = "fs_root"
GPTS_ENABLED = "gpts_enabled"
HOST = "host"
INTERVAL = "interval"
INTERVALS = "intervals"
RAW_OUTPUT = "raw_output"
MAX_FILE_SIZE = "max_file_size"
MAX_RANGE = "max_range"
MODE = "mode"
NAME = "name"
NEXT_SCHEDULED_INTERVAL = "next_scheduled_interval"
PID = "pid"
PORT = "port"
POWER = "power"
PULSE_LENGTH = "pulse_length"
SAMPLE_INTERVAL = "sample_interval"
SAMPLE_RANGE = "sample_range"
SAVE_INDEX = "save_index"
SAVE_BOTTOM = "save_bottom"
SAVE_RAW = "save_raw"
SCHEDULE = "schedule"
SCHEDULE_FILENAME = "schedule_filename"
SCHEDULED_INTERVALS_REMAINING = "scheduled_intervals_remaining"
START_AT = "start_at"
STOP_REPEATING_AT = "stop_repeating_at"
TYPE = "type"
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
ZPLSC_STATUS = 'zplsc_status'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
SCHEDULE = "schedule"
FTP_IP_ADDRESS = "ftp_ip_address"
FTP_USERNAME = "ftp_username"
FTP_PASSWORD = "ftp_password"
FTP_PORT = "ftp_port"
class Prompt(BaseEnum):
"""
Device i/o prompts..
"""
class Command(BaseEnum):
"""
Instrument command strings
"""
ACQUIRE_STATUS = 'acquire_status'
START_AUTOSAMPLE = 'start_autosample'
STOP_AUTOSAMPLE = 'stop_autosample'
GET = 'get_param'
SET = 'set_param'
###############################################################################
# Data Particles
###############################################################################
class ZPLSCStatusParticleKey(BaseEnum):
ZPLSC_CONNECTED = "zplsc_connected" # Connected to a running ER 60 instance
ZPLSC_ACTIVE_38K_MODE = "zplsc_active_38k_mode" # 38K Transducer transmit mode
ZPLSC_ACTIVE_38K_POWER = "zplsc_active_38k_power" # 38K Transducer transmit power in W
ZPLSC_ACTIVE_38K_PULSE_LENGTH = "zplsc_active_38k_pulse_length" # 38K Transducer transmit pulse length in seconds
ZPLSC_ACTIVE_38K_SAMPLE_INTERVAL = "zplsc_active_38k_sample_interval" # Sample interval in seconds
ZPLSC_ACTIVE_120K_MODE = "zplsc_active_120k_mode" # 120K Transducer transmit mode
ZPLSC_ACTIVE_120K_POWER = "zplsc_active_120k_power" # 120K Transducer transmit power in W
ZPLSC_ACTIVE_120K_PULSE_LENGTH = "zplsc_active_120k_pulse_length" # 120K Transducer Transmit pulse length in seconds
ZPLSC_ACTIVE_120K_SAMPLE_INTERVAL = "zplsc_active_120k_sample_interval" # 120K Sample Interval
ZPLSC_ACTIVE_200K_MODE = "zplsc_active_200k_mode" # 200K Transducer transmit mode
ZPLSC_ACTIVE_200K_POWER = "zplsc_active_200k_power" # 200K Transducer transmit power in W
ZPLSC_ACTIVE_200K_PULSE_LENGTH = "zplsc_active_200k_pulse_length" # 200K Transducer transmit pulse length in seconds
ZPLSC_ACTIVE_200K_SAMPLE_INTERVAL = "zplsc_active_200k_sample_interval" # 200K Transducer sample interval
ZPLSC_CURRENT_UTC_TIME = "zplsc_current_utc_time" # Current UTC Time
ZPLSC_EXECUTABLE = "zplsc_executable" # Executable used to launch ER60
ZPLSC_FS_ROOT = "zplsc_fs_root" # Root directory where data/logs/configs are stored
ZPLSC_NEXT_SCHEDULED_INTERVAL = "zplsc_next_scheduled_interval" # UTC time of next scheduled interval
ZPLSC_HOST = "zplsc_host" # Host IP Address
ZPLSC_PID = "zplsc_pid" # PID of running ER60 process
ZPLSC_PORT = "zplsc_port" # Host port number
ZPLSC_CURRENT_RAW_FILENAME = "zplsc_current_raw_filename" # File name of the current .raw file
ZPLSC_CURRENT_RAW_FILESIZE = "zplsc_current_raw_filesize" # File size of current .raw file
ZPLSC_FILE_PATH = "zplsc_file_path" # File storage path
ZPLSC_FILE_PREFIX = "zplsc_file_prefix" # Current file prefix
ZPLSC_MAX_FILE_SIZE = "zplsc_max_file_size" # Maximum file size
ZPLSC_SAMPLE_RANGE = "zplsc_sample_range" # Recording range
ZPLSC_SAVE_BOTTOM = "zplsc_save_bottom" # Save bottom file
ZPLSC_SAVE_INDEX = "zplsc_save_index" # Save index file
ZPLSC_SAVE_RAW = "zplsc_save_raw" # Save raw file
ZPLSC_SCHEDULED_INTERVALS_REMAINING = "zplsc_scheduled_intervals_remaining" # Number of intervals remaining in running schedule
ZPLSC_GPTS_ENABLED = "zplsc_gpts_enabled" # GPTs enabled
ZPLSC_SCHEDULE_FILENAME = "zplsc_schedule_filename" # Filename for .yaml schedule file
class ZPLSCStatusParticle(DataParticle):
"""
Routines for parsing raw data into a status particle structure. Override
the building of values, and the rest should come along for free.
Sample:
{'connected': True,
'er60_channels': {'GPT 38 kHz 00907207b7b1 6-1 OOI.38|200': {'frequency': 38000,
'mode': 'active',
'power': 100.0,
'pulse_length': 0.000256,
'sample_interval': 6.4e-05},
'GPT 120 kHz 00907207b7dc 1-1 ES120-7CD': {'frequency': 120000,
'mode': 'active',
'power': 100.0,
'pulse_length': 6.4e-05,
'sample_interval': 1.6e-05},
'GPT 200 kHz 00907207b7b1 6-2 OOI38|200': {'frequency': 200000,
'mode': 'active',
'power': 120.0,
'pulse_length': 6.4e-05,
'sample_interval': 1.6e-05}},
'er60_status': {'current_running_interval': None,
'current_utc_time': '2014-07-08 22:34:18.667000',
'executable': 'c:/users/ooi/desktop/er60.lnk',
'fs_root': 'D:/',
'host': '157.237.15.100',
'next_scheduled_interval': None,
'pid': 1864,
'port': 56635,
'raw_output': {'current_raw_filename': 'OOI-D20140707-T214500.raw',
'current_raw_filesize': None,
'file_path': 'D:\\data\\QCT_1',
'file_prefix': 'OOI',
'max_file_size': 52428800,
'sample_range': 220.0,
'save_bottom': True,
'save_index': True,
'save_raw': True},
'scheduled_intervals_remaining': 0},
'gpts_enabled': False,
'schedule': {},
'schedule_filename': 'qct_configuration_example_1.yaml'}
"""
__metaclass__ = get_logging_metaclass(log_level='trace')
_data_particle_type = DataParticleType.ZPLSC_STATUS
def _encode_value(self, name, value, encoding_function):
"""
Encode a value using the encoding function, if it fails store the error in a queue
Override to handle None values.
"""
encoded_val = None
if value is not None:
try:
encoded_val = encoding_function(value)
except Exception:
log.error("Data particle error encoding. Name:%s Value:%s", name, value)
self._encoding_errors.append({name: value})
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def _build_parsed_values(self):
"""
Parse ZPLSC Status response and return the ZPLSC Status particles
@throws SampleException If there is a problem with sample
"""
try:
log.debug("status raw_data = %s", self.raw_data)
config = self.raw_data
if not isinstance(config, dict):
raise SampleException("ZPLSC status data is not a dictionary" % self.raw_data)
active_200k_mode = None
active_200k_power = None
active_200k_pulse_length = None
active_200k_sample_interval = None
active_120k_mode = None
active_120k_power = None
active_120k_pulse_length = None
active_120k_sample_interval = None
active_38k_mode = None
active_38k_power = None
active_38k_pulse_length = None
active_38k_sample_interval = None
connected = config.get(CONNECTED)
er60_channels = config.get(ER60_CHANNELS)
if er60_channels is not None:
for key in er60_channels:
if '200 kHz' in key:
active_200k_mode = er60_channels[key].get(MODE)
active_200k_power = er60_channels[key].get(POWER)
active_200k_pulse_length = er60_channels[key].get(PULSE_LENGTH)
active_200k_sample_interval = er60_channels[key].get(SAMPLE_INTERVAL)
elif '120 kHz' in key:
active_120k_mode = er60_channels[key].get(MODE)
active_120k_power = er60_channels[key].get(POWER)
active_120k_pulse_length = er60_channels[key].get(PULSE_LENGTH)
active_120k_sample_interval = er60_channels[key].get(SAMPLE_INTERVAL)
elif '38 kHz' in key:
active_38k_mode = er60_channels[key].get(MODE)
active_38k_power = er60_channels[key].get(POWER)
active_38k_pulse_length = er60_channels[key].get(PULSE_LENGTH)
active_38k_sample_interval = er60_channels[key].get(SAMPLE_INTERVAL)
current_utc_time = None
executable = None
fs_root = None
next_scheduled_interval = 'None'
host = None
pid = '0'
port = None
current_raw_filename = None
current_raw_filesize = 0
file_path = None
file_prefix = None
max_file_size = None
sample_range = None
save_bottom = None
save_index = None
save_raw = None
scheduled_intervals_remaining = None
er60_status = config.get(ER60_STATUS)
if er60_status is not None:
current_utc_time = er60_status.get(CURRENT_UTC_TIME)
executable = er60_status.get(EXECUTABLE)
fs_root = er60_status.get(FS_ROOT)
if er60_status.get(NEXT_SCHEDULED_INTERVAL) is not None:
next_scheduled_interval = er60_status.get(NEXT_SCHEDULED_INTERVAL)
host = er60_status.get(HOST)
if er60_status.get(PID) is not None:
pid = er60_status.get(PID)
port = er60_status.get(PORT)
raw_output = er60_status.get(RAW_OUTPUT)
if raw_output is not None:
current_raw_filename = raw_output.get(CURRENT_RAW_FILENAME)
if raw_output.get(CURRENT_RAW_FILESIZE) is not None:
current_raw_filesize = raw_output.get(CURRENT_RAW_FILESIZE)
file_path = raw_output.get(FILE_PATH)
file_prefix = raw_output.get(FILE_PREFIX)
max_file_size = raw_output.get(MAX_FILE_SIZE)
sample_range = raw_output.get(SAMPLE_RANGE)
save_bottom = raw_output.get(SAVE_BOTTOM)
save_index = raw_output.get(SAVE_INDEX)
save_raw = raw_output.get(SAVE_RAW)
scheduled_intervals_remaining = er60_status.get(SCHEDULED_INTERVALS_REMAINING)
gpts_enabled = config.get(GPTS_ENABLED)
schedule_filename = config.get(SCHEDULE_FILENAME)
except KeyError:
raise SampleException("ValueError while converting ZPLSC Status: [%s]" % self.raw_data)
result = [
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_CONNECTED, connected, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_200K_MODE, active_200k_mode, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_200K_POWER, active_200k_power, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_200K_PULSE_LENGTH, active_200k_pulse_length, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_200K_SAMPLE_INTERVAL, active_200k_sample_interval, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_120K_MODE, active_120k_mode, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_120K_POWER, active_120k_power, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_120K_PULSE_LENGTH, active_120k_pulse_length, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_120K_SAMPLE_INTERVAL, active_120k_sample_interval, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_38K_MODE, active_38k_mode, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_38K_POWER, active_38k_power, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_38K_PULSE_LENGTH, active_38k_pulse_length, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_ACTIVE_38K_SAMPLE_INTERVAL, active_38k_sample_interval, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_CURRENT_UTC_TIME, current_utc_time, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_EXECUTABLE, executable, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_FS_ROOT, fs_root, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_NEXT_SCHEDULED_INTERVAL, next_scheduled_interval, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_HOST, host, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_PID, pid, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_PORT, port, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_CURRENT_RAW_FILENAME, current_raw_filename, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_CURRENT_RAW_FILESIZE, current_raw_filesize, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_FILE_PATH, file_path, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_FILE_PREFIX, file_prefix, str),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_MAX_FILE_SIZE, max_file_size, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SAMPLE_RANGE, sample_range, float),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SAVE_BOTTOM, save_bottom, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SAVE_INDEX, save_index, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SAVE_RAW, save_raw, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SCHEDULED_INTERVALS_REMAINING, scheduled_intervals_remaining, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_GPTS_ENABLED, gpts_enabled, int),
self._encode_value(ZPLSCStatusParticleKey.ZPLSC_SCHEDULE_FILENAME, schedule_filename, str)
]
log.debug("build_parsed_value: %s", result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state machine.
"""
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = get_logging_metaclass(log_level='trace')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_driver_dict()
self._build_command_dict()
self._build_param_dict()
# Add sample handlers.
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
self._param_dict.add(Parameter.SCHEDULE,
r'schedule:\s+(.*)',
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
display_name="Schedule",
description="Large block of text used to create the .yaml file defining the sampling schedule.",
startup_param=True,
default_value=yaml.dump(DEFAULT_CONFIG, default_flow_style=False))
self._param_dict.add(Parameter.FTP_IP_ADDRESS,
r'ftp address:\s+(\d\d\d\d\.\d\d\d\d\.\d\d\d\d\.\d\d\d)',
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
display_name="FTP IP Address",
description="IP address the driver uses to connect to the instrument FTP server.",
startup_param=True,
default_value=DEFAULT_HOST)
self._param_dict.add(Parameter.FTP_USERNAME,
r'username:(.*)',
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
display_name="FTP User Name",
description="Username used to connect to the FTP server.",
startup_param=True,
default_value=USER_NAME)
self._param_dict.add(Parameter.FTP_PASSWORD,
r'password:(.*)',
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
display_name="FTP Password",
description="Password used to connect to the FTP server.",
startup_param=True,
default_value=PASSWORD)
self._param_dict.add(Parameter.FTP_PORT,
r'port:(.*)',
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
display_name="FTP Port",
description="Location on the OOI infrastructure where .raw files and echogram images will be stored.",
startup_param=True,
default_value=DEFAULT_PORT)
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state
@retval (next_state, next_agent_state)
"""
# Try to get the status to check if the instrument is alive
host = self._param_dict.get_config_value(Parameter.FTP_IP_ADDRESS)
port = self._param_dict.get_config_value(Parameter.FTP_PORT)
response = self._url_request(host, port, '/status.json')
if response is None:
error_msg = "_handler_unknown_discover: Unable to connect to host: %s" % host
log.error(error_msg)
raise InstrumentConnectionException(error_msg)
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_get(self, *args, **kwargs):
"""
Get parameters while in the command state.
@param params List of the parameters to pass to the state
@retval returns (next_state, result) where result is a dict {}. No
agent state changes happening with Get, so no next_agent_state
@throw InstrumentParameterException for invalid parameter
"""
result_vals = {}
# Retrieve required parameter.
# Raise if no parameter provided, or not a dict.
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_get requires a parameter dict.')
if Parameter.ALL in params:
log.debug("Parameter ALL in params")
params = Parameter.list()
params.remove(Parameter.ALL)
log.debug("_handler_command_get: params = %s", params)
if params is None or not isinstance(params, list):
raise InstrumentParameterException("GET parameter list not a list!")
# fill the return values from the update
for param in params:
if not Parameter.has(param):
raise InstrumentParameterException("Invalid parameter!")
result_vals[param] = self._param_dict.get(param)
self._param_dict.get_config_value(param)
result = result_vals
log.debug("Get finished, next_state: %s, result: %s", None, result)
return None, result
def _handler_command_set(self, *args, **kwargs):
"""
Set parameter
@retval next state, result
"""
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_set: command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
# For each key, val in the params, set the param dictionary.
old_config = self._param_dict.get_config()
self._set_params(params, startup)
new_config = self._param_dict.get_config()
if old_config != new_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return None, None
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
# verify param is not readonly param
self._verify_not_readonly(*args, **kwargs)
for key, val in params.iteritems():
log.debug("KEY = %s VALUE = %s", key, val)
self._param_dict.set_value(key, val)
if key == Parameter.SCHEDULE:
self._ftp_schedule_file()
# Load the schedule file
host = self._param_dict.get(Parameter.FTP_IP_ADDRESS)
port = self._param_dict.get_config_value(Parameter.FTP_PORT)
log.debug("_set_params: stop the current schedule file")
self._url_request(host, port, '/stop_schedule', data={})
log.debug("_set_params: upload driver YAML file to host %s", host)
res = self._url_request(host, port, '/load_schedule', data=json.dumps({'filename': YAML_FILE_NAME}))
log.debug("_set_params: result from load = %s", res)
log.debug("set complete, update params")
def _ftp_schedule_file(self):
"""
Construct a YAML schedule file and
ftp the file to the Instrument server
"""
# Create a temporary file and write the schedule YAML information to the file
try:
config_file = tempfile.TemporaryFile()
log.debug("temporary file created")
if config_file is None or not isinstance(config_file, file):
raise InstrumentException("config_file is not a temp file!")
config_file.write(self._param_dict.get(Parameter.SCHEDULE))
config_file.seek(0)
log.debug("finished writing config file:\n%r", self._param_dict.get(Parameter.SCHEDULE))
except Exception as e:
log.error("Create schedule YAML file exception: %s", e)
raise e
# FTP the schedule file to the ZPLSC server
host = ''
try:
log.debug("Create a ftp session")
host = self._param_dict.get_config_value(Parameter.FTP_IP_ADDRESS)
log.debug("Got host ip address %s", host)
ftp_session = ftplib.FTP()
ftp_session.connect(host)
ftp_session.login(USER_NAME, PASSWORD)
log.debug("ftp session was created...")
ftp_session.set_pasv(False)
ftp_session.cwd("config")
ftp_session.storlines('STOR ' + YAML_FILE_NAME, config_file)
files = ftp_session.dir()
log.debug("*** Config yaml file sent: %s", files)
ftp_session.quit()
config_file.close()
except (ftplib.socket.error, ftplib.socket.gaierror), e:
log.error("ERROR: cannot reach FTP Host %s: %s ", host, e)
raise InstrumentException("ERROR: cannot reach FTP Host %s " % host)
log.debug("*** FTP %s to ftp host %s successfully", YAML_FILE_NAME, host)
def _url_request(self, host, port, page, data=None):
"""
Loads a schedule file previously uploaded to the instrument and sets it as
the active instrument configuration
"""
result = None
url = "https://%s:%d/%s" % (host, port, page)
try:
if data is not None:
log.debug("Request data: %s", data)
req = urllib2.Request(url, data=data, headers={'Content-Type': 'application/json'})
else:
log.debug("No request data")
req = urllib2.Request(url)
log.debug("Request url: %s", req.__dict__)
f = urllib2.urlopen(req, timeout=10)
res = f.read()
f.close()
except urllib2.HTTPError as e:
log.error("Failed to open url %s. %s", url, e)
return result
except urllib2.URLError as e:
log.error("Failed to open url %s. %s", url, e)
return result
try:
result = json.loads(res)
except ValueError:
log.error("Request from url %s is not in valid json format, returned: %s.", url, res)
return result
def _handler_command_autosample(self, *args, **kwargs):
"""
Start autosample mode
@retval next_state, (next_resource_state, result) tuple
"""
# FTP the driver schedule file to the instrument server
self._ftp_schedule_file()
# Stop the current running schedule file just in case one is running and
# load the driver schedule file
host = self._param_dict.get(Parameter.FTP_IP_ADDRESS)
port = self._param_dict.get_config_value(Parameter.FTP_PORT)
log.debug("_handler_command_autosample: stop the current schedule file")
self._url_request(host, port, '/stop_schedule', data={})
log.debug("_handler_command_autosample: upload driver YAML file to host %s", host)
res = self._url_request(host, port, '/load_schedule', data=json.dumps({'filename': YAML_FILE_NAME}))
log.debug(" result from load = %s", res)
if res.get('result') != 'OK':
raise InstrumentException('_handler_command_autosample: Load Instrument Schedule File Error.')
res = self._url_request(host, port, '/start_schedule', data={})
if res.get('result') != 'OK':
raise InstrumentException('_handler_command_autosample: Start Schedule File Error.')
return ProtocolState.AUTOSAMPLE, (ResourceAgentState.STREAMING, None)
def _handler_command_acquire_status(self, *args, **kwargs):
"""
Acquire status from the instrument
@retval next_state, (next_resource_state, result) tuple
"""
host = self._param_dict.get_config_value(Parameter.FTP_IP_ADDRESS)
port = self._param_dict.get_config_value(Parameter.FTP_PORT)
response = self._url_request(host, port, '/status.json')
if response:
log.debug("_handler_command_acquire_status: response from status = %r", response)
particle = ZPLSCStatusParticle(response, port_timestamp=self._param_dict.get_current_timestamp())
self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate())
else:
log.error("_handler_command_acquire_status: Failed to acquire status from instrument.")
return None, (None, None)
########################################################################
# Autosample handlers
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample mode
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_stop(self):
"""
Stop autosample mode
@retval next_state, (next_resource_state, result) tuple
"""
host = self._param_dict.get_config_value(Parameter.FTP_IP_ADDRESS)
port = self._param_dict.get_config_value(Parameter.FTP_PORT)
log.debug("_handler_autosample_stop: stop the current schedule file")
res = self._url_request(host, port, '/stop_schedule', data={})
log.debug("handler_autosample_stop: stop schedule returns %r", res)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
|
bsd-2-clause
| 1,710,098,018,871,872,300
| 42.567148
| 132
| 0.572542
| false
| 4.178518
| true
| false
| false
|
EmbodiedCognition/pagoda
|
pagoda/physics.py
|
1
|
47156
|
'''This module contains convenience wrappers for ODE objects.'''
from __future__ import division
import collections
import numpy as np
import ode
BodyState = collections.namedtuple(
'BodyState', 'name position quaternion linear_velocity angular_velocity')
class Registrar(type):
'''A metaclass that builds a registry of its subclasses.'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
key = name.lower()
for i in range(3, len(name) + 1):
cls._registry[key[:i]] = cls
super(Registrar, cls).__init__(name, bases, dct)
def build(cls, key, *args, **kwargs):
return cls._registry[key.lower()](*args, **kwargs)
class Body(Registrar(str('Base'), (), {})):
'''This class wraps things that participate in the ODE physics simulation.
This class basically provides lots of Python-specific properties that call
the equivalent ODE getters and setters for things like position, rotation,
etc.
'''
def __init__(self, name, world, density=1000., mass=None, **shape):
self.name = name
self.world = world
self.shape = shape
m = ode.Mass()
self.init_mass(m, density, mass)
self.ode_body = ode.Body(world.ode_world)
self.ode_body.setMass(m)
self.ode_geom = getattr(ode, 'Geom%s' % self.__class__.__name__)(
world.ode_space, **shape)
self.ode_geom.setBody(self.ode_body)
def __str__(self):
return '{0.__class__.__name__} {0.name} at {1}'.format(
self, self.position.round(3))
@property
def mass(self):
'''The ODE mass object for this body.'''
return self.ode_body.getMass()
@property
def state(self):
'''The state of this body includes:
- name of the body (str)
- position (3-tuple)
- quaternion (4-tuple)
- linear velocity (3-tuple)
- angular velocity (3-tuple)
'''
return BodyState(self.name,
tuple(self.position),
tuple(self.quaternion),
tuple(self.linear_velocity),
tuple(self.angular_velocity))
@state.setter
def state(self, state):
'''Set the state of this body.
Parameters
----------
state : BodyState tuple
The desired state of the body.
'''
assert self.name == state.name, \
'state name "{}" != body name "{}"'.format(state.name, self.name)
self.position = state.position
self.quaternion = state.quaternion
self.linear_velocity = state.linear_velocity
self.angular_velocity = state.angular_velocity
@property
def position(self):
'''The (x, y, z) coordinates of the center of this body.'''
return np.array(self.ode_body.getPosition())
@position.setter
def position(self, position):
'''Set the (x, y, z) coordinates of the center of this body.
Parameters
----------
position : 3-tuple of float
The coordinates of the desired center of this body.
'''
self.ode_body.setPosition(tuple(position))
@property
def rotation(self):
'''The rotation matrix for this body.'''
return np.array(self.ode_body.getRotation()).reshape((3, 3))
@rotation.setter
def rotation(self, rotation):
'''Set the rotation of this body using a rotation matrix.
Parameters
----------
rotation : sequence of 9 floats
The desired rotation matrix for this body.
'''
if isinstance(rotation, np.ndarray):
rotation = rotation.ravel()
self.ode_body.setRotation(tuple(rotation))
@property
def quaternion(self):
'''The (w, x, y, z) rotation quaternion for this body.'''
return np.array(self.ode_body.getQuaternion())
@quaternion.setter
def quaternion(self, quaternion):
self.ode_body.setQuaternion(tuple(quaternion))
@property
def linear_velocity(self):
'''Current linear velocity of this body (in world coordinates).'''
return np.array(self.ode_body.getLinearVel())
@linear_velocity.setter
def linear_velocity(self, velocity):
'''Set the linear velocity for this body.
Parameters
----------
velocity : 3-tuple of float
The desired velocity for this body, in world coordinates.
'''
self.ode_body.setLinearVel(tuple(velocity))
@property
def angular_velocity(self):
'''Current angular velocity of this body (in world coordinates).'''
return np.array(self.ode_body.getAngularVel())
@angular_velocity.setter
def angular_velocity(self, velocity):
'''Set the angular velocity for this body.
Parameters
----------
velocity : 3-tuple of float
The desired angular velocity for this body, in world coordinates.
'''
self.ode_body.setAngularVel(tuple(velocity))
@property
def force(self):
'''Current net force acting on this body (in world coordinates).'''
return np.array(self.ode_body.getForce())
@force.setter
def force(self, force):
'''Set the force acting on this body.
Parameters
----------
force : 3-tuple of float
The desired force acting on this body, in world coordinates.
'''
self.ode_body.setForce(tuple(force))
@property
def torque(self):
'''Current net torque acting on this body (in world coordinates).'''
return np.array(self.ode_body.getTorque())
@torque.setter
def torque(self, torque):
'''Set the torque acting on this body.
Parameters
----------
torque : 3-tuple of float
The desired torque acting on this body, in world coordinates.
'''
self.ode_body.setTorque(tuple(torque))
@property
def is_kinematic(self):
'''True iff this body is kinematic.'''
return self.ode_body.isKinematic()
@is_kinematic.setter
def is_kinematic(self, is_kinematic):
'''Set the kinematic/dynamic attribute for this body.
In pagoda, kinematic bodies have infinite mass and do interact with
other bodies via collisions.
Parameters
----------
is_kinematic : bool
If True, this body will be set to kinematic. If False, it will be
set to dynamic.
'''
if is_kinematic:
self.ode_body.setKinematic()
else:
self.ode_body.setDynamic()
@property
def follows_gravity(self):
'''True iff this body follows gravity.'''
return self.ode_body.getGravityMode()
@follows_gravity.setter
def follows_gravity(self, follows_gravity):
'''Set whether this body follows gravity.
Parameters
----------
follows_gravity : bool
This body will follow gravity iff this parameter is True.
'''
self.ode_body.setGravityMode(follows_gravity)
def rotate_to_body(self, x):
'''Rotate the given vector to the same orientation as this body.
Parameters
----------
x : 3-tuple of float
A point in three dimensions.
Returns
-------
xrot : 3-tuple of float
The same point after rotation into the orientation of this body.
'''
return np.dot(x, self.rotation)
def body_to_world(self, position):
'''Convert a body-relative offset to world coordinates.
Parameters
----------
position : 3-tuple of float
A tuple giving body-relative offsets.
Returns
-------
position : 3-tuple of float
A tuple giving the world coordinates of the given offset.
'''
return np.array(self.ode_body.getRelPointPos(tuple(position)))
def world_to_body(self, position):
'''Convert a point in world coordinates to a body-relative offset.
Parameters
----------
position : 3-tuple of float
A world coordinates position.
Returns
-------
offset : 3-tuple of float
A tuple giving the body-relative offset of the given position.
'''
return np.array(self.ode_body.getPosRelPoint(tuple(position)))
def relative_offset_to_world(self, offset):
'''Convert a relative body offset to world coordinates.
Parameters
----------
offset : 3-tuple of float
The offset of the desired point, given as a relative fraction of the
size of this body. For example, offset (0, 0, 0) is the center of
the body, while (0.5, -0.2, 0.1) describes a point halfway from the
center towards the maximum x-extent of the body, 20% of the way from
the center towards the minimum y-extent, and 10% of the way from the
center towards the maximum z-extent.
Returns
-------
position : 3-tuple of float
A position in world coordinates of the given body offset.
'''
return np.array(self.body_to_world(offset * self.dimensions / 2))
def add_force(self, force, relative=False, position=None, relative_position=None):
'''Add a force to this body.
Parameters
----------
force : 3-tuple of float
A vector giving the forces along each world or body coordinate axis.
relative : bool, optional
If False, the force values are assumed to be given in the world
coordinate frame. If True, they are assumed to be given in the
body-relative coordinate frame. Defaults to False.
position : 3-tuple of float, optional
If given, apply the force at this location in world coordinates.
Defaults to the current position of the body.
relative_position : 3-tuple of float, optional
If given, apply the force at this relative location on the body. If
given, this method ignores the ``position`` parameter.
'''
b = self.ode_body
if relative_position is not None:
op = b.addRelForceAtRelPos if relative else b.addForceAtRelPos
op(force, relative_position)
elif position is not None:
op = b.addRelForceAtPos if relative else b.addForceAtPos
op(force, position)
else:
op = b.addRelForce if relative else b.addForce
op(force)
def add_torque(self, torque, relative=False):
'''Add a torque to this body.
Parameters
----------
force : 3-tuple of float
A vector giving the torque along each world or body coordinate axis.
relative : bool, optional
If False, the torque values are assumed to be given in the world
coordinate frame. If True, they are assumed to be given in the
body-relative coordinate frame. Defaults to False.
'''
op = self.ode_body.addRelTorque if relative else self.ode_body.addTorque
op(torque)
def join_to(self, joint, other_body=None, **kwargs):
'''Connect this body to another one using a joint.
This method creates a joint to fasten this body to the other one. See
:func:`World.join`.
Parameters
----------
joint : str
The type of joint to use when connecting these bodies.
other_body : :class:`Body` or str, optional
The other body to join with this one. If not given, connects this
body to the world.
'''
self.world.join(joint, self, other_body, **kwargs)
def connect_to(self, joint, other_body, offset=(0, 0, 0), other_offset=(0, 0, 0),
**kwargs):
'''Move another body next to this one and join them together.
This method will move the ``other_body`` so that the anchor points for
the joint coincide. It then creates a joint to fasten the two bodies
together. See :func:`World.move_next_to` and :func:`World.join`.
Parameters
----------
joint : str
The type of joint to use when connecting these bodies.
other_body : :class:`Body` or str
The other body to join with this one.
offset : 3-tuple of float, optional
The body-relative offset where the anchor for the joint should be
placed. Defaults to (0, 0, 0). See :func:`World.move_next_to` for a
description of how offsets are specified.
other_offset : 3-tuple of float, optional
The offset on the second body where the joint anchor should be
placed. Defaults to (0, 0, 0). Like ``offset``, this is given as an
offset relative to the size and shape of ``other_body``.
'''
anchor = self.world.move_next_to(self, other_body, offset, other_offset)
self.world.join(joint, self, other_body, anchor=anchor, **kwargs)
class Box(Body):
@property
def lengths(self):
return self.shape['lengths']
@property
def dimensions(self):
return np.array(self.lengths).squeeze()
@property
def volume(self):
return np.prod(self.lengths)
def init_mass(self, m, density, mass):
if mass:
density = mass / self.volume
m.setBox(density, *self.lengths)
class Sphere(Body):
@property
def radius(self):
return self.shape['radius']
@property
def dimensions(self):
d = 2 * self.radius
return np.array([d, d, d]).squeeze()
@property
def volume(self):
return 4 / 3 * np.pi * self.radius ** 3
def init_mass(self, m, density, mass):
if mass:
density = mass / self.volume
m.setSphere(density, self.radius)
class Cylinder(Body):
@property
def radius(self):
return self.shape['radius']
@property
def length(self):
return self.shape['length']
@property
def dimensions(self):
d = 2 * self.radius
return np.array([d, d, self.length]).squeeze()
@property
def volume(self):
return self.length * np.pi * self.radius ** 2
def init_mass(self, m, density, mass):
if mass:
density = mass / self.volume
m.setCylinder(density, 3, self.radius, self.length)
class Capsule(Body):
@property
def radius(self):
return self.shape['radius']
@property
def length(self):
return self.shape['length']
@property
def dimensions(self):
d = 2 * self.radius
return np.array([d, d, d + self.length]).squeeze()
@property
def volume(self):
return 4 / 3 * np.pi * self.radius ** 3 + \
self.length * np.pi * self.radius ** 2
def init_mass(self, m, density, mass):
if mass:
density = mass / self.volume
m.setCapsule(density, 3, self.radius, self.length)
def _get_params(target, param, dof):
'''Get the given param from each of the DOFs for a joint.'''
return [target.getParam(getattr(ode, 'Param{}{}'.format(param, s)))
for s in ['', '2', '3'][:dof]]
def _set_params(target, param, values, dof):
'''Set the given param for each of the DOFs for a joint.'''
if not isinstance(values, (list, tuple, np.ndarray)):
values = [values] * dof
assert dof == len(values)
for s, value in zip(['', '2', '3'][:dof], values):
target.setParam(getattr(ode, 'Param{}{}'.format(param, s)), value)
class Joint(Registrar(str('Base'), (), {})):
'''Base class for joints connecting two bodies.
In ODE, :class:`Body` objects represent mass/inertia properties, while
:class:`Joint` and :class:`Motor` objects represent mathematical constraints
that govern how specific pairs of bodies interact. For example, a
:class:`BallJoint` that connects two bodies will force the anchor point for
those two bodies to remain in the same location in world coordinates -- any
linear force that displaces one of the bodies will also cause a force to be
applied to the second body, because of the constraint imposed by the ball
joint. As another example, a :class:`Slider` that connects two bodies allows
those two bodies to displace relative to one another along a single axis,
but not to rotate with respect to one another -- any torque applied to one
body will also cause a torque to be applied to the other body.
Constraints can be applied to angular degrees of freedom (e.g.,
:class:`AMotor`), linear degrees of freedom (e.g., :class:`BallJoint`,
:class:`LMotor`), or both (e.g., :class:`PistonJoint`).
Both joints and motors apply constraints to pairs of bodies, but they are
quite different in many ways and so are represented using specific
subclasses. This superclass is just a mixin to avoid repeating the getters
and setters that are common between motors and joints.
'''
ADOF = 0
LDOF = 0
@property
def feedback(self):
'''Feedback buffer (list of 3-tuples) for this ODE motor/joint.'''
return self.ode_obj.getFeedback()
@property
def positions(self):
'''List of positions for linear degrees of freedom.'''
return [self.ode_obj.getPosition(i) for i in range(self.LDOF)]
@property
def position_rates(self):
'''List of position rates for linear degrees of freedom.'''
return [self.ode_obj.getPositionRate(i) for i in range(self.LDOF)]
@property
def angles(self):
'''List of angles for rotational degrees of freedom.'''
return [self.ode_obj.getAngle(i) for i in range(self.ADOF)]
@property
def angle_rates(self):
'''List of angle rates for rotational degrees of freedom.'''
return [self.ode_obj.getAngleRate(i) for i in range(self.ADOF)]
@property
def axes(self):
'''List of axes for this object's degrees of freedom.'''
return [np.array(self.ode_obj.getAxis(i))
for i in range(self.ADOF or self.LDOF)]
@axes.setter
def axes(self, axes):
'''Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
'''
assert self.ADOF == len(axes) or self.LDOF == len(axes)
for i, axis in enumerate(axes):
if axis is not None:
self.ode_obj.setAxis(i, 0, axis)
@property
def lo_stops(self):
'''List of lo stop values for this object's degrees of freedom.'''
return _get_params(self.ode_obj, 'LoStop', self.ADOF + self.LDOF)
@lo_stops.setter
def lo_stops(self, lo_stops):
'''Set the lo stop values for this object's degrees of freedom.
Parameters
----------
lo_stops : float or sequence of float
A lo stop value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom. For rotational
degrees of freedom, these values must be in radians.
'''
_set_params(self.ode_obj, 'LoStop', lo_stops, self.ADOF + self.LDOF)
@property
def hi_stops(self):
'''List of hi stop values for this object's degrees of freedom.'''
return _get_params(self.ode_obj, 'HiStop', self.ADOF + self.LDOF)
@hi_stops.setter
def hi_stops(self, hi_stops):
'''Set the hi stop values for this object's degrees of freedom.
Parameters
----------
hi_stops : float or sequence of float
A hi stop value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom. For rotational
degrees of freedom, these values must be in radians.
'''
_set_params(self.ode_obj, 'HiStop', hi_stops, self.ADOF + self.LDOF)
@property
def velocities(self):
'''List of target velocity values for rotational degrees of freedom.'''
return _get_params(self.ode_obj, 'Vel', self.ADOF + self.LDOF)
@velocities.setter
def velocities(self, velocities):
'''Set the target velocities for this object's degrees of freedom.
Parameters
----------
velocities : float or sequence of float
A target velocity value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom. For rotational
degrees of freedom, these values must be in radians / second.
'''
_set_params(self.ode_obj, 'Vel', velocities, self.ADOF + self.LDOF)
@property
def max_forces(self):
'''List of max force values for rotational degrees of freedom.'''
return _get_params(self.ode_obj, 'FMax', self.ADOF + self.LDOF)
@max_forces.setter
def max_forces(self, max_forces):
'''Set the maximum forces for this object's degrees of freedom.
Parameters
----------
max_forces : float or sequence of float
A maximum force value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom.
'''
_set_params(self.ode_obj, 'FMax', max_forces, self.ADOF + self.LDOF)
@property
def erps(self):
'''List of ERP values for this object's degrees of freedom.'''
return _get_params(self.ode_obj, 'ERP', self.ADOF + self.LDOF)
@erps.setter
def erps(self, erps):
'''Set the ERP values for this object's degrees of freedom.
Parameters
----------
erps : float or sequence of float
An ERP value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom.
'''
_set_params(self.ode_obj, 'ERP', erps, self.ADOF + self.LDOF)
@property
def cfms(self):
'''List of CFM values for this object's degrees of freedom.'''
return _get_params(self.ode_obj, 'CFM', self.ADOF + self.LDOF)
@cfms.setter
def cfms(self, cfms):
'''Set the CFM values for this object's degrees of freedom.
Parameters
----------
cfms : float or sequence of float
A CFM value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom.
'''
_set_params(self.ode_obj, 'CFM', cfms, self.ADOF + self.LDOF)
@property
def stop_cfms(self):
'''List of lo/hi stop CFM values.'''
return _get_params(self.ode_obj, 'StopCFM', self.ADOF + self.LDOF)
@stop_cfms.setter
def stop_cfms(self, stop_cfms):
'''Set the CFM values for this object's DOF limits.
Parameters
----------
stop_cfms : float or sequence of float
A CFM value to set on all degrees of freedom limits, or a list
containing one such value for each degree of freedom limit.
'''
_set_params(self.ode_obj, 'StopCFM', stop_cfms, self.ADOF + self.LDOF)
@property
def stop_erps(self):
'''List of lo/hi stop ERP values.'''
return _get_params(self.ode_obj, 'StopERP', self.ADOF + self.LDOF)
@stop_erps.setter
def stop_erps(self, stop_erps):
'''Set the ERP values for this object's DOF limits.
Parameters
----------
stop_erps : float or sequence of float
An ERP value to set on all degrees of freedom limits, or a list
containing one such value for each degree of freedom limit.
'''
_set_params(self.ode_obj, 'StopERP', stop_erps, self.ADOF + self.LDOF)
def enable_feedback(self):
'''Enable feedback on this ODE object.'''
self.ode_obj.setFeedback(True)
def disable_feedback(self):
'''Disable feedback on this ODE object.'''
self.ode_obj.setFeedback(False)
class Dynamic(Joint):
'''This class wraps an ODE motor -- either an LMotor or an AMotor.
Parameters
----------
name : str
A name for this object in the world.
world : :class:`World`
A world object to which this motor belongs.
body_a : :class:`Body`
A first body connected to this joint.
body_b : :class:`Body`, optional
A second body connected to this joint. If not given, the joint will
connect the first body to the world.
feedback : bool, optional
Feedback will be enabled on this motor iff this is True. Defaults to
False.
dof : int, optional
Number of degrees of freedom in this motor. Defaults to 3.
jointgroup : ode.JointGroup, optional
A joint group to which this motor belongs. Defaults to the default joint
group in the world.
'''
def __init__(self, name, world, body_a, body_b=None, feedback=False, dof=3,
jointgroup=None):
self.name = name
self.ode_obj = self.MOTOR_FACTORY(world.ode_world, jointgroup=jointgroup)
self.ode_obj.attach(body_a.ode_body, body_b.ode_body if body_b else None)
self.ode_obj.setNumAxes(dof)
self.cfms = 1e-8
if feedback:
self.enable_feedback()
else:
self.disable_feedback()
class AMotor(Dynamic):
'''An angular motor applies torques to change an angle in the physics world.
AMotors can be created in "user" mode---in which case the user must supply
all axis and angle values---or, for 3-DOF motors, in "euler" mode---in which
case the first and last axes must be specified, and ODE computes the middle
axis automatically.
'''
MOTOR_FACTORY = ode.AMotor
def __init__(self, *args, **kwargs):
mode = kwargs.pop('mode', 'user')
if isinstance(mode, str):
mode = ode.AMotorEuler if mode.lower() == 'euler' else ode.AMotorUser
super(AMotor, self).__init__(*args, **kwargs)
self.ode_obj.setMode(mode)
@property
def ADOF(self):
'''Number of angular degrees of freedom for this motor.'''
return self.ode_obj.getNumAxes()
@property
def axes(self):
'''List of axes for this object's degrees of freedom.'''
return [np.array(self.ode_obj.getAxis(i)) for i in range(self.ADOF)]
@axes.setter
def axes(self, axes):
'''Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axis parameters
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set, or
(c) a dictionary with an "axis" key specifying the axis to set and
an optional "rel" key (defaults to 0) specifying the relative
body to set the axis on.
'''
assert len(axes) == self.ADOF
for i, ax in enumerate(axes):
if ax is None:
continue
if not isinstance(ax, dict):
ax = dict(axis=ax)
self.ode_obj.setAxis(i, ax.get('rel', 0), ax['axis'])
def add_torques(self, torques):
'''Add the given torques along this motor's axes.
Parameters
----------
torques : sequence of float
A sequence of torque values to apply to this motor's axes.
'''
self.ode_obj.addTorques(*torques)
class LMotor(Dynamic):
'''An LMotor applies forces to change a position in the physics world.'''
MOTOR_FACTORY = ode.LMotor
@property
def LDOF(self):
'''Number of linear degrees of freedom for this motor.'''
return self.ode_obj.getNumAxes()
class Kinematic(Joint):
'''This class wraps kinematic ODE joints with some Python properties.
Parameters
----------
name : str
Name of the joint to create. This is only to make the joint discoverable
in the world.
world : :class:`World`
Wrapper for the world in which this joint exists.
body_a : :class:`Body`
Wrapper for the first body that this joint connects.
body_b : :class:`Body`, optional
Wrapper for the second body that this joint connects. If this is None,
the joint will connect ``body_a`` to the ``world``.
anchor : 3-tuple of floats, optional
Anchor in world coordinates for the joint. Optional for :class:`Fixed`
joint.
feedback : bool, optional
If this is True, a force feedback structure will be enabled for this
joint, which will make it possible to record the forces that this joint
exerts on its two bodies. By default, no structure will be allocated.
jointgroup : ODE joint group, optional
Add the joint to this group. Defaults to the default world joint group.
'''
def __init__(self, name, world, body_a, body_b=None, anchor=None,
feedback=False, jointgroup=None, amotor=True, lmotor=True):
self.name = name
build = getattr(ode, '{}Joint'.format(self.__class__.__name__))
self.ode_obj = build(world.ode_world, jointgroup=jointgroup)
self.ode_obj.attach(body_a.ode_body, body_b.ode_body if body_b else None)
if anchor is not None:
self.ode_obj.setAnchor(tuple(anchor))
self.ode_obj.setParam(ode.ParamCFM, 0)
self.amotor = None
if self.ADOF > 0 and amotor:
self.amotor = AMotor(name=name + ':amotor',
world=world,
body_a=body_a,
body_b=body_b,
feedback=feedback,
jointgroup=jointgroup,
dof=self.ADOF,
mode='euler' if self.ADOF == 3 else 'user')
self.lmotor = None
if self.LDOF > 0 and lmotor:
self.lmotor = LMotor(name=name + ':lmotor',
world=world,
body_a=body_a,
body_b=body_b,
feedback=feedback,
jointgroup=jointgroup,
dof=self.LDOF)
if feedback:
self.enable_feedback()
else:
self.disable_feedback()
def __str__(self):
return self.name
@property
def anchor(self):
'''3-tuple specifying location of this joint's anchor.'''
return np.array(self.ode_obj.getAnchor())
@property
def anchor2(self):
'''3-tuple specifying location of the anchor on the second body.'''
return np.array(self.ode_obj.getAnchor2())
def add_torques(self, *torques):
'''Add the given torques along this joint's axes.
Parameters
----------
torques : sequence of float
A sequence of torque values to apply to this motor's axes.
'''
self.amotor.add_torques(*torques)
class Fixed(Kinematic):
ADOF = 0
LDOF = 0
class Slider(Kinematic):
ADOF = 0
LDOF = 1
@property
def positions(self):
'''List of positions for this joint's linear degrees of freedom.'''
return [self.ode_obj.getPosition()]
@property
def position_rates(self):
'''List of position rates for this joint's degrees of freedom.'''
return [self.ode_obj.getPositionRate()]
@property
def axes(self):
'''Axis of displacement for this joint.'''
return [np.array(self.ode_obj.getAxis())]
@axes.setter
def axes(self, axes):
'''Set the linear axis of displacement for this joint.
Parameters
----------
axes : list containing one 3-tuple of floats
A list of the axes for this joint. For a slider joint, which has one
degree of freedom, this must contain one 3-tuple specifying the X,
Y, and Z axis for the joint.
'''
self.lmotor.axes = [axes[0]]
self.ode_obj.setAxis(tuple(axes[0]))
class Hinge(Kinematic):
ADOF = 1
LDOF = 0
@property
def angles(self):
'''List of angles for this joint's rotational degrees of freedom.'''
return [self.ode_obj.getAngle()]
@property
def angle_rates(self):
'''List of angle rates for this joint's degrees of freedom.'''
return [self.ode_obj.getAngleRate()]
@property
def axes(self):
'''Axis of rotation for this joint.'''
return [np.array(self.ode_obj.getAxis())]
@axes.setter
def axes(self, axes):
'''Set the angular axis of rotation for this joint.
Parameters
----------
axes : list containing one 3-tuple of floats
A list of the axes for this joint. For a hinge joint, which has one
degree of freedom, this must contain one 3-tuple specifying the X,
Y, and Z axis for the joint.
'''
self.amotor.axes = [axes[0]]
self.ode_obj.setAxis(tuple(axes[0]))
class Piston(Kinematic):
ADOF = 1
LDOF = 1
@property
def axes(self):
'''Axis of rotation and displacement for this joint.'''
return [np.array(self.ode_obj.getAxis())]
@axes.setter
def axes(self, axes):
self.amotor.axes = [axes[0]]
self.lmotor.axes = [axes[0]]
self.ode_obj.setAxis(axes[0])
class Universal(Kinematic):
ADOF = 2
LDOF = 0
@property
def axes(self):
'''A list of axes of rotation for this joint.'''
return [np.array(self.ode_obj.getAxis1()),
np.array(self.ode_obj.getAxis2())]
@axes.setter
def axes(self, axes):
self.amotor.axes = [axes[0], axes[1]]
setters = [self.ode_obj.setAxis1, self.ode_obj.setAxis2]
for axis, setter in zip(axes, setters):
if axis is not None:
setter(tuple(axis))
@property
def angles(self):
'''A list of two angles for this joint's degrees of freedom.'''
return [self.ode_obj.getAngle1(), self.ode_obj.getAngle2()]
@property
def angle_rates(self):
'''A list of two angle rates for this joint's degrees of freedom.'''
return [self.ode_obj.getAngle1Rate(), self.ode_obj.getAngle2Rate()]
class Ball(Kinematic):
ADOF = 3
LDOF = 0
def __init__(self, name, *args, **kwargs):
super(Ball, self).__init__(name, *args, **kwargs)
# we augment ball joints with an additional motor that allows us to set
# rotation limits.
keys = 'name world body_a body_b feedback dof jointgroup'.split()
self.alimit = AMotor(name + ':alimit', *args, dof=self.ADOF, mode='euler',
**{k: v for k, v in kwargs.items() if k in keys})
@property
def angles(self):
return self.alimit.angles
@property
def angle_rates(self):
return self.alimit.angle_rates
@property
def axes(self):
return self.alimit.axes
@axes.setter
def axes(self, axes):
if len(axes) == 2:
axes = dict(rel=1, axis=axes[0]), None, dict(rel=2, axis=axes[1])
self.amotor.axes = axes
self.alimit.axes = axes
@property
def lo_stops(self):
return self.alimit.lo_stops
@lo_stops.setter
def lo_stops(self, lo_stops):
self.alimit.lo_stops = lo_stops
@property
def hi_stops(self):
return self.alimit.hi_stops
@hi_stops.setter
def hi_stops(self, hi_stops):
self.alimit.hi_stops = hi_stops
def make_quaternion(theta, *axis):
'''Given an angle and an axis, create a quaternion.'''
x, y, z = axis
r = np.sqrt(x * x + y * y + z * z)
st = np.sin(theta / 2.)
ct = np.cos(theta / 2.)
return [x * st / r, y * st / r, z * st / r, ct]
def center_of_mass(bodies):
'''Given a set of bodies, compute their center of mass in world coordinates.
'''
x = np.zeros(3.)
t = 0.
for b in bodies:
m = b.mass
x += b.body_to_world(m.c) * m.mass
t += m.mass
return x / t
class World(object):
'''A wrapper for an ODE World object, for running in a simulator.'''
def __init__(self, dt=1. / 60, max_angular_speed=20):
self.ode_world = ode.World()
self.ode_world.setMaxAngularSpeed(max_angular_speed)
self.ode_space = ode.QuadTreeSpace((0, 0, 0), (100, 100, 20), 10)
self.ode_floor = ode.GeomPlane(self.ode_space, (0, 0, 1), 0)
self.ode_contactgroup = ode.JointGroup()
self.frame_no = 0
self.dt = dt
self.elasticity = 0.1
self.friction = 2000
self.gravity = 0, 0, -9.81
self.cfm = 1e-6
self.erp = 0.7
self._bodies = {}
self._joints = {}
@property
def gravity(self):
'''Current gravity vector in the world.'''
return self.ode_world.getGravity()
@gravity.setter
def gravity(self, gravity):
'''Set the gravity vector in the world.
Parameters
----------
gravity : 3-tuple of float
The vector where gravity should point.
'''
return self.ode_world.setGravity(gravity)
@property
def cfm(self):
'''Current global CFM value.'''
return self.ode_world.getCFM()
@cfm.setter
def cfm(self, cfm):
'''Set the global CFM value.
Parameters
----------
cfm : float
The desired global CFM value.
'''
return self.ode_world.setCFM(cfm)
@property
def erp(self):
'''Current global ERP value.'''
return self.ode_world.getERP()
@erp.setter
def erp(self, erp):
'''Set the global ERP value.
Parameters
----------
erp : float
The desired global ERP value.
'''
return self.ode_world.setERP(erp)
@property
def bodies(self):
'''Sequence of all bodies in the world, sorted by name.'''
for k in sorted(self._bodies):
yield self._bodies[k]
@property
def joints(self):
'''Sequence of all joints in the world, sorted by name.'''
for k in sorted(self._joints):
yield self._joints[k]
def get_body(self, key):
'''Get a body by key.
Parameters
----------
key : str, None, or :class:`Body`
The key for looking up a body. If this is None or a :class:`Body`
instance, the key itself will be returned.
Returns
-------
body : :class:`Body`
The body in the world with the given key.
'''
return self._bodies.get(key, key)
def get_joint(self, key):
'''Get a joint by key.
Parameters
----------
key : str
The key for a joint to look up.
Returns
-------
joint : :class:`Joint`
The joint in the world with the given key, or None if there is no
such joint.
'''
return self._joints.get(key, None)
def create_body(self, shape, name=None, **kwargs):
'''Create a new body.
Parameters
----------
shape : str
The "shape" of the body to be created. This should name a type of
body object, e.g., "box" or "cap".
name : str, optional
The name to use for this body. If not given, a default name will be
constructed of the form "{shape}{# of objects in the world}".
Returns
-------
body : :class:`Body`
The created body object.
'''
shape = shape.lower()
if name is None:
for i in range(1 + len(self._bodies)):
name = '{}{}'.format(shape, i)
if name not in self._bodies:
break
self._bodies[name] = Body.build(shape, name, self, **kwargs)
return self._bodies[name]
def join(self, shape, body_a, body_b=None, name=None, **kwargs):
'''Create a new joint that connects two bodies together.
Parameters
----------
shape : str
The "shape" of the joint to use for joining together two bodies.
This should name a type of joint, such as "ball" or "piston".
body_a : str or :class:`Body`
The first body to join together with this joint. If a string is
given, it will be used as the name of a body to look up in the
world.
body_b : str or :class:`Body`, optional
If given, identifies the second body to join together with
``body_a``. If not given, ``body_a`` is joined to the world.
name : str, optional
If given, use this name for the created joint. If not given, a name
will be constructed of the form
"{body_a.name}^{shape}^{body_b.name}".
Returns
-------
joint : :class:`Joint`
The joint object that was created.
'''
ba = self.get_body(body_a)
bb = self.get_body(body_b)
shape = shape.lower()
if name is None:
name = '{}^{}^{}'.format(ba.name, shape, bb.name if bb else '')
self._joints[name] = Joint.build(
shape, name, self, body_a=ba, body_b=bb, **kwargs)
return self._joints[name]
def move_next_to(self, body_a, body_b, offset_a, offset_b):
'''Move one body to be near another one.
After moving, the location described by ``offset_a`` on ``body_a`` will
be coincident with the location described by ``offset_b`` on ``body_b``.
Parameters
----------
body_a : str or :class:`Body`
The body to use as a reference for moving the other body. If this is
a string, it is treated as the name of a body to look up in the
world.
body_b : str or :class:`Body`
The body to move next to ``body_a``. If this is a string, it is
treated as the name of a body to look up in the world.
offset_a : 3-tuple of float
The offset of the anchor point, given as a relative fraction of the
size of ``body_a``. See :func:`Body.relative_offset_to_world`.
offset_b : 3-tuple of float
The offset of the anchor point, given as a relative fraction of the
size of ``body_b``.
Returns
-------
anchor : 3-tuple of float
The location of the shared point, which is often useful to use as a
joint anchor.
'''
ba = self.get_body(body_a)
bb = self.get_body(body_b)
if ba is None:
return bb.relative_offset_to_world(offset_b)
if bb is None:
return ba.relative_offset_to_world(offset_a)
anchor = ba.relative_offset_to_world(offset_a)
offset = bb.relative_offset_to_world(offset_b)
bb.position = bb.position + anchor - offset
return anchor
def get_body_states(self):
'''Return the complete state of all bodies in the world.
Returns
-------
states : list of state information tuples
A list of body state information for each body in the world. See
:func:`Body.state`.
'''
return [b.state for b in self.bodies]
def set_body_states(self, states):
'''Set the states of some bodies in the world.
Parameters
----------
states : sequence of states
A complete state tuple for one or more bodies in the world. See
:func:`get_body_states`.
'''
for state in states:
self.get_body(state.name).state = state
def step(self, substeps=2):
'''Step the world forward by one frame.
Parameters
----------
substeps : int, optional
Split the step into this many sub-steps. This helps to prevent the
time delta for an update from being too large.
'''
self.frame_no += 1
dt = self.dt / substeps
for _ in range(substeps):
self.ode_contactgroup.empty()
self.ode_space.collide(None, self.on_collision)
self.ode_world.step(dt)
def needs_reset(self):
'''Return True iff the world needs to be reset.'''
return False
def reset(self):
'''Reset the state of the world.'''
pass
def on_key_press(self, key, modifiers, keymap):
'''Handle an otherwise unhandled keypress event (from a GUI).'''
if key == keymap.ENTER:
self.reset()
return True
def are_connected(self, body_a, body_b):
'''Determine whether the given bodies are currently connected.
Parameters
----------
body_a : str or :class:`Body`
One body to test for connectedness. If this is a string, it is
treated as the name of a body to look up.
body_b : str or :class:`Body`
One body to test for connectedness. If this is a string, it is
treated as the name of a body to look up.
Returns
-------
connected : bool
Return True iff the two bodies are connected.
'''
return bool(ode.areConnected(
self.get_body(body_a).ode_body,
self.get_body(body_b).ode_body))
def on_collision(self, args, geom_a, geom_b):
'''Callback function for the collide() method.
Parameters
----------
args : None
Arguments passed when the callback was registered. Not used.
geom_a : ODE geometry
The geometry object of one of the bodies that has collided.
geom_b : ODE geometry
The geometry object of one of the bodies that has collided.
'''
body_a = geom_a.getBody()
body_b = geom_b.getBody()
if ode.areConnected(body_a, body_b) or \
(body_a and body_a.isKinematic()) or \
(body_b and body_b.isKinematic()):
return
for c in ode.collide(geom_a, geom_b):
c.setBounce(self.elasticity)
c.setMu(self.friction)
ode.ContactJoint(self.ode_world, self.ode_contactgroup, c).attach(
geom_a.getBody(), geom_b.getBody())
|
mit
| -2,953,832,453,207,946,000
| 32.349364
| 86
| 0.58192
| false
| 4.007479
| false
| false
| false
|
IQSS/geoconnect
|
gc_apps/gis_shapefiles/views_02_visualize.py
|
1
|
4711
|
from __future__ import print_function
import logging
from django.http import HttpResponse
from django.views.generic import View
from django.template.loader import render_to_string
from django.conf import settings
from gc_apps.gis_shapefiles.models import ShapefileInfo
from gc_apps.worldmap_layers.models import WorldMapLayerInfo
from gc_apps.worldmap_connect.send_shapefile_service import SendShapefileService
from gc_apps.geo_utils.geoconnect_step_names import GEOCONNECT_STEP_KEY, STEP2_STYLE,\
PANEL_TITLE_MAP_DATA_FILE, PANEL_TITLE_STYLE_MAP
from shared_dataverse_information.layer_classification.forms import\
ClassifyLayerForm, ATTRIBUTE_VALUE_DELIMITER
from gc_apps.geo_utils.message_helper_json import MessageHelperJSON
from gc_apps.gis_tabular.views import build_map_html
LOGGER = logging.getLogger(__name__)
from gc_apps.geo_utils.msg_util import msg, msgt
"""
Handle AJAX requests to Visualize a Layer
- Upon successful visualizations, several pieces of the page are update including
- page title
- breadcrumb
- main content panel
"""
def render_ajax_basic_err_msg(err_note, shapefile_info=None):
"""Convenience method for returning an error message via AJAX"""
d = { 'DATAVERSE_SERVER_URL' : settings.DATAVERSE_SERVER_URL\
, 'ERR_NOTE' : err_note\
, 'shapefile_info' : shapefile_info\
}
return render_to_string('gis_shapefiles/view_02_ajax_basic_err.html', d)
class ViewAjaxVisualizeShapefile(View):
"""
Given the md5 of a ShapefileInfo, attempt to visualize the file on WorldMap
Return a JSON response
"""
def get(self, request, shp_md5):
"""Use the SendShapefileService to create a map from a shapefile.
- SendShapefileService takes care of details starting with retrieving
the ShapefileInfo object
"""
# OK if shp_md5 is None, SendShapefileService creates error message
#
send_shp_service = SendShapefileService(**dict(shp_md5=shp_md5))
# Send the shapefile to WorldMap
#
success = send_shp_service.send_shapefile_to_worldmap()
# -----------------------------------
# Did it work? NOPE! Failed along the way!
# -----------------------------------
if not success:
err_note = ('Sorry! The shapefile mapping did not work.'
'<br /><span class="small">{0}</span>').format(\
'<br />'.join(send_shp_service.err_msgs))
LOGGER.error(err_note)
err_note_html = render_ajax_basic_err_msg(err_note,\
send_shp_service.shapefile_info)
json_msg = MessageHelperJSON.get_json_fail_msg(err_note_html, dict(id_main_panel_content=err_note_html))
return HttpResponse(json_msg, content_type="application/json", status=200)
# -----------------------------------
# Yes! We have a new map layer
# -----------------------------------
worldmap_shapefile_layerinfo = send_shp_service.get_worldmap_layerinfo()
shapefile_info = worldmap_shapefile_layerinfo.get_gis_data_info()
assert worldmap_shapefile_layerinfo is not None,\
"Failure in SendShapefileService! Said success but not worldmap_layerinfo (WorldMapShapefileLayerInfo)"
# -----------------------------------------
# Build the Map HTML to replace the form
# -----------------------------------------
map_html, user_message_html = build_map_html(request, worldmap_shapefile_layerinfo)
if map_html is None: # Failed! Send an error
LOGGER.error("Failed to create map HTML using WorldMapShapefileLayerInfo: %s (%d)",\
worldmap_shapefile_layerinfo, worldmap_shapefile_layerinfo.id)
user_msg = 'Sorry! Failed to create map. Please try again. (code: s3)'
json_msg = MessageHelperJSON.get_json_fail_msg(user_msg)
return HttpResponse(json_msg, content_type="application/json", status=200)
# -----------------------------------------
# Looks good. In the JSON response, send
# back the map HTML
# -----------------------------------------
data_dict = dict(\
map_html=map_html,
user_message_html=user_message_html,
id_main_panel_title=PANEL_TITLE_STYLE_MAP,
message='Success! The shapefile was successfully mapped!')
json_msg = MessageHelperJSON.get_json_success_msg("great job", data_dict=data_dict)
return HttpResponse(json_msg, content_type="application/json", status=200)
|
apache-2.0
| 1,538,729,477,637,049,300
| 37.300813
| 116
| 0.612397
| false
| 3.968829
| false
| false
| false
|
nickmilon/milonpy
|
milonpy/utils/basic2.py
|
1
|
14147
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#######################################################
'''
module: utilities.basic2
Created:Aug 21, 2012
author: nickmilon
Description: Description: Simple utilities (2) and Vars - Very Limited IMPORTS HERE !
'''
#######################################################
from sys import stdout
from datetime import datetime , timedelta
from basic import FMT_dtGen,FMT_tGen, color_txt ,color_switch_txt,dictDot
from time import sleep ,time,mktime
import re
from random import random
def re_is_sameLen(txt,rexp):return len(txt)==len(rexp.findall(txt))
def re_is_same(txt,rexp):return txt==u''.join(rexp.findall(txt))
def re_diff(txt,rexp):return ''.join(list(set([c for c in txt]) - set(rexp.findall(txt))))
#re_gr=re.compile(ur'[\u03AC-\u03CE]|[;\s]', re.IGNORECASE| re.VERBOSE| re.UNICODE |re.MULTILINE)
def lst_randomize(lst):
"returns list in random order"
return [i[1] for i in [[random(),i] for i in sorted(lst)] ]
def time_seconds_since_epoch(dt=None):
if dt is None:dt=datetime.utcnow()
return mktime(dt.timetuple())+1e-6*dt.microsecond
def autoRetry(exceptionOrTuple,retries=3,sleepSeconds=1, BackOfFactor=1,loggerFun=None):
""" exceptionOrTuple= exception or tuple of exceptions,BackOfFactor=factor to back off on each retry loggerFun i.e. logger.info """
def wrapper(func):
def fun_call(*args, **kwargs):
tries = 0
while tries < retries:
try:
return func(*args, **kwargs)
except exceptionOrTuple, e:
tries += 1
if loggerFun:loggerFun("exception [%s] e=[%s] handled tries :%d sleeping[%f]" % (exceptionOrTuple ,e,tries,sleepSeconds * tries * BackOfFactor) )
sleep(sleepSeconds * tries * BackOfFactor) #* tries)
raise
return fun_call
return wrapper
def parseJSfunFromFile(filepath,functionName):
"""
helper function to get a js function string from a file containing js functions. Function must be named starting in first column and file must end with //eof//
lazyloads re
"""
with open( filepath) as fin:
r=re.search("(^.*?)(?P<fun>function\s+?%s.*?)(^fun|//eof//)" % functionName,fin.read(),re.MULTILINE|re.DOTALL)
return r.group('fun').strip() if r else False
def stdout_switchColor(color):
stdout.write (color_switch_txt(color))
def stdout_write_flush(txt,stAfter="\r",color=None):
if color:txt= color_txt(color,txt)
stdout.write("%s%s" %(txt,stAfter) )
stdout.flush()
class timeElapsed(object):
""" overwrite str_dtimes str_deltas to return "" to exclude this form output string
@todo: logging handler
"""
def __init__(self, cnt_max=1,name_str=""):
self.name_str=name_str
self.cnt_max= cnt_max
self.dt_start=datetime.utcnow()
self.dt_last=self.dt_start
self.dt_current=self.dt_start
self.cnt=0
self.cnt_last=0
self.cnt_last_dif=0
self.perc_done=0.0
self.time_remaining=0
self.time_elapsed_since_start=timedelta(0)
self.time_elapsed_since_last=timedelta(0)
self.time_remaining =timedelta(0)
self.units=['sec','min','hour']
self.set_cnt_max(cnt_max)
def set_cnt_max(self,val):
self.cnt_max=val
self.frmt_str="%s%d%s" %("%",len(str(val)),"d" )
def set_auto_unit(self,velocity,unit_idx=0):
if velocity < 1 and unit_idx < 2:
velocity=velocity * 60
unit_idx+=1
return self.set_auto_unit(velocity, unit_idx)
else:
return velocity, self.units[unit_idx]
def frmt_max(self,val):
return self.frmt_str % val
def update(self,cur_val,getStr=True,):
cur_val=float(cur_val)
if cur_val > self.cnt_max:self.set_cnt_max(self.cnt_max+int(cur_val/10))
self.dt_current=datetime.utcnow()
self.time_elapsed_since_start = self.dt_current- self.dt_start
self.time_elapsed_since_last=self.dt_current- self.dt_last
self.cnt_last_dif=self.cnt_last-cur_val
self.perc_done=cur_val/self.cnt_max
self.time_remaining =timedelta(seconds=int ( self.time_elapsed_since_start.total_seconds() * ( (1-self.perc_done)/self.perc_done)))
self.cnt=cur_val
self.v_start= self.cnt/self.time_elapsed_since_start.total_seconds()
self.v_last= self.cnt_last_dif/self.time_elapsed_since_last.total_seconds()
self.dt_last=self.dt_current
self.cnt_last=cur_val
return self.toStr() if getStr else True
def update_last(self,cur_val,getStr=True):
self.cnt_max=cur_val
return self.update(cur_val,getStr)
def str_counters(self):
return u"|%s of %s" %(self.frmt_max(self.cnt), self.frmt_max(self.cnt_max))
def str_dtimes(self):
return u"⌚ %s %s %s" % (self.dt_start.strftime(FMT_dtGen),self.dt_current.strftime(FMT_tGen), (self.dt_current+self.time_remaining).strftime(FMT_tGen))
def str_tdeltas(self):
return u"⌛ %s %s %s" %(self._str_tdelta(self.time_elapsed_since_start),self._str_tdelta(self.time_elapsed_since_last), self._str_tdelta(self.time_remaining) )
@staticmethod
def _str_tdelta(tdelta):
str_td=str(tdelta)
tmp=str_td.find(".")
if tmp !=-1 : str_td= str_td[:tmp]
return u"%8s" % str_td
def toStr(self):
return u"[%s:%6.2f%%%s%s%s]" %(self.name_str,100* self.perc_done, self.str_counters(),
self.str_tdeltas(),self.str_dtimes() )
class SubToEvent(object):
''' lightwaight Event handler modeled after Peter Thatcher's http://www.valuedlessons.com/2008/04/events-in-python.html
usage:
watcher = SubToEvent()
def log_docs(doc):print doc
watcher += log_docs
watcher += lambda x:str(x)
watcher.stop()
'''
def __init__(self,channelName=''):
self.channelName=channelName
self.handlers = set()
def handle(self, handler):
self.handlers.add(handler)
return self
def unhandle(self, handler):
try:
self.handlers.remove(handler)
except:
raise ValueError("No_such_handler")
return self
def fire(self, *args, **kargs):
for handler in self.handlers:
handler(*args, **kargs)
def fireTopic(self,topic=None,verb=None,payload=None):
self.fire ((self.channelName,topic,verb,payload))
def getHandlerCount(self):
return len(self.handlers)
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = getHandlerCount
class multiOrderedDict(object):
'''
deletes can't be multi
'''
def __init__(self,lst):
self.lstDic=lst
def __getitem__ (self,key):
return self._getOrSetDictItem(key)
def __setitem__(self, key, val):
return self._getOrSetDictItem(key,True,val)
def __delitem__ (self, key):
return self._getOrSetDictItem(key,delete=True)
def get(self,key,orVal=None):
try:
return self[key]
except KeyError:
return orVal
def keys(self):
return[i[0] for i in self.lstDic if self.isKey(i[0])]
def values(self):
return [self[i] for i in self.keys()]
def isKey(self,k):
return True
def _getOrSetDictItem (self,key,setVal=False,newVal=None,multi=False,delete=False):
idx=[]
for n,i in enumerate(self.lstDic):
if i[0]==key and self.isKey(i[0]):
idx.append(n)
if setVal:self.lstDic[n]=[i[0],newVal]
if not multi: break
if len(idx)>0:
if delete:
self.lstDic.pop(idx[0]) #can't be multi
return None
rt= [self.lstDic[i][1:] for i in idx ]
if multi:
return rt
else:
return rt[0][0]
else:
if setVal:
self.lstDic.append([key,newVal])
return newVal
else:
raise KeyError (key)
def toDict(self):
return dict(zip(self.keys(),self.values()))
def toString(self):
return str(self.toDict())
__str__ = toString
class confFileDict(multiOrderedDict):
def __init__(self,path,skipBlanks=True,skipRemarks=True):
self.path=path
with open(self.path) as fin:
rlines=fin.readlines()
if skipBlanks:rlines=[i for i in rlines if not i=='\n']
if skipRemarks:rlines=[i for i in rlines if not i.startswith("#")]
lstDic=[ map(lambda x: x.strip(), i.split("=") ) for i in rlines]
super(confFileDict, self).__init__(lstDic)
def isKey(self,key):
return key !='' and not key.startswith("#")
def toStr(self):
s=''
for i in self.lstDic:
s+= "=".join(i)+'\n'
return s.rstrip()
def toFile(self,path=None):
if not path:path=self.path
with open(path, 'w') as fl:
fl.write(self.toStr)
def PrintTiming(func):
"""set up a decorator function for timing"""
def wrapper(*args, **kargs):
t1 = time.time()
res = func(*args, **kargs)
tel = time.time()-t1
timeformated = time.strftime( "%H:%M:%S",time.gmtime(tel))
print '-'*5 + '%s took %0.3f ms' % (func.func_name + str(kargs) + str(args), (tel)*1000.0) + '|' + timeformated + '|'+ '-'*10
return res
return wrapper
def totalsVertical(orgD,resD,funct,initFunc):
'''Apply funct to resD dict values by orgD values, creates keys in resD if do not exist
usufull for vertical persentage and totals
attention : it is ditractive replacing resD with results
i.e: to incr resD values by OrgD values resultAply(orgDict,resultsDict,lambda x,y:x+y, lambda x:0)
to find perc of org : .resultAply(res,dorg[0].value,lambda x,y:100*y/x if x!=0 else None,None)
'''
for k in orgD.keys():
if isinstance(orgD[k],dict):
if resD.get(k):
totalsVertical(orgD[k],resD[k],funct,initFunc)
else:
if initFunc:
resD[k]=totalsVertical(orgD[k],dictDot({}),funct,initFunc)
else: continue
elif isinstance(orgD[k],(float,int)):
if resD.get(k,False) is False :
if initFunc:
resD[k]=initFunc(orgD[k])
else:
continue
resD[k] = funct(orgD[k],resD[k])
else:
if initFunc:resD[k]=orgD[k]
return resD
def totalsVertSimple(orgD,resD,funct):
''' simplified and faster version of totalsVertical assumes all key/values of orgD are present in resD
'''
for k in orgD.keys():
if isinstance(orgD[k],dict):totalsVertSimple(orgD[k],resD[k],funct)
elif isinstance(orgD[k],(float,int)):orgD[k]=funct(orgD[k],resD[k])
return orgD
def totalsHorizontal(value,a_dict,funct=lambda x,y:100*x/y):
for k in a_dict.keys():
if isinstance(a_dict[k],dict):totalsHorizontal(value,a_dict[k])
elif isinstance(a_dict[k],(float,int)):a_dict[k]=funct(a_dict[k],value)
return a_dict
class TextWrapper(object):
''' http://jrgraphix.net/r/Unicode/ '''
elipsis=u"\u2026" # "…"
numbers=u"₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎"
def __init__(self, maxLen=140,minLen=100, contChr=u'⎘',inclNumbers=True,strFlag=u'',strFirst=u'',strRest=u'',strAll=u''):
self.contChr=contChr
self.inlNumbers=inclNumbers
self.strFlag=strFlag
self.strFirst=strFirst
self.strRest=strRest
self.strAll=strAll
self.maxLen=maxLen
self.minLen=minLen
def compineStr(self,s,cnt,totalCnt=None):
return "%s%s%s%s%s" %(self.strFlag,self.formatNumOfTotal(cnt+1,totalCnt) if self.inlNumbers else u'', self.strAll, self.strFirst if cnt==0 else self.strRest,s)
def splits(self,astr):
n=self.maxLen-1- len(self.contChr)
minLen=self.minLen
cnt=0
s=self.compineStr(astr, cnt)
while len(s) > n:
cnt+=1
rf=s[0:n].rfind(u'\n',minLen)
if rf == -1:rf=s[0:n].rfind(u'.',minLen)
if rf == -1:rf=s[0:n].rfind(u' ',minLen)
spltn = rf+1 if rf !=-1 else n
#print "(%3d) %3d %3d %3d [%s]" %(cnt, rf,n,spltn,s[0:n])
rt=s[:spltn].rstrip()
remainingStr=s[spltn:]
if self.contChr !=u'':
if len(remainingStr)>1:rt+=self.contChr
else:
rt+=remainingStr
remainingStr=u''
yield rt
s=self.compineStr(remainingStr, cnt) if remainingStr !=u'' else u''
yield s
def formatNumOfTotal(self,cnt, totalCnt=None):
return u"%s∕%s" %(self.formatNum(cnt),u'??' if totalCnt is None else self.formatNum(totalCnt)) #'∕' is not '/' but math '\u2215'
def formatNum(self,num):
header=map(int,str(num))
rt=[self.numbers[i] for i in header]
return ''.join(rt)
def format(self,text):
rt=[]
for i in self.splits(text):
if i !=u'':rt.append(i)
if self.inlNumbers:
rt2=[]
maxCnt=len(rt)
for cnt,vl in enumerate(rt):
old= self.formatNumOfTotal(cnt+1,None)
new= u'' if maxCnt == 1 else self.formatNumOfTotal(cnt+1,maxCnt)
if new !=u'':new += u' '* (len(old)-len(new))
rt2.append(vl.replace(old, new , 1))
return rt2
return rt
################## tests
def test_timeElapsed(x):
et=timeElapsed(x,"foo")
for i in range(1,x):
sleep(1)
print et.update(i, True)
print et.update_last(i)
###################
|
apache-2.0
| -7,230,937,652,603,792,000
| 37.752747
| 168
| 0.57072
| false
| 3.306376
| false
| false
| false
|
AnthonyCheetham/naco_ispy
|
data_handling_scripts/queue_cal_analysis.py
|
1
|
1931
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 11 10:41:32 2016
Program to run through the calibrations folders and queue all data for analysis
It isn't yet smart enough to check which ones are done already
@author: cheetham
"""
import naco_ispy,subprocess,os,argparse,glob
parser = argparse.ArgumentParser(description='This program queues up all unprocessed NACO ISPY calibration data for analysis.')
parser.add_argument('-dry_run', dest="dry_run",action='store_const',const=True,
default=False, help='Dont actually queue the analysis, but print the commands it will do')
parser.add_argument('--num', action="store", dest="num", type=int, default=-1,
help='Maximum number of datasets to process')
# Get the input arguments
args = parser.parse_args()
num = args.num
data_folder = '/data/NACO/'
# db_filename = '/data/NACO/calib_table.dat'
# data_folder='/Users/cheetham/data/naco_data/GTO/'
#db_filename='/Users/cheetham/data/data_archive/GTO/obs_table.dat'
dry_run = args.dry_run
# First, load the target database
# calib_db = naco_ispy.databases.calib_table(filename=db_filename, data_folder=data_folder)
scripts_directory = os.path.expanduser('~/code/naco_ispy/processing_scripts/')
# Instead of using the database, use glob to find all folders
all_folders = glob.glob(data_folder+'Calib/*/')
# Loop through the targets in the database
for targ_ix,targ_folder in enumerate(all_folders[0:num]):
# Check what we want to process
process_script = scripts_directory+'naco_calibrations.slurm'
# The command to run:
cmd = "echo 'bash "+process_script+"' | at -q b now"
# Change to the right directory
os.chdir(targ_folder)
if dry_run:
print('Queueing analysis for '+targ_folder)
print(' '+cmd)
else:
# Execute the processing command
subprocess.call(cmd,shell=True)
|
gpl-3.0
| -3,123,554,130,894,556,000
| 32.310345
| 127
| 0.684102
| false
| 3.523723
| false
| false
| false
|
Maronato/SpottedBot
|
custom_auth/facebook_methods.py
|
1
|
4677
|
import facebook
from django.conf import settings
from django.shortcuts import reverse
from urllib.parse import urlencode, quote, unquote
from django.contrib.auth import login
from django.contrib import messages
app_id = settings.SOCIAL_FACEBOOK_KEY
app_secret = settings.SOCIAL_FACEBOOK_SECRET
def get_graph():
"""Get App Graph Object.
returns a graph object containing an app token from the registered facebook app
"""
graph = facebook.GraphAPI(version='3.1')
graph.access_token = graph.get_app_access_token(app_id, app_secret)
return graph
def canv_url(request):
"""Return Canvas URL.
Generates the canvas_url used by facebook to redirect after auth
"""
# Check whether the last call was secure and use its protocol
if request.is_secure():
return 'https://' + request.get_host() + reverse('social_login:facebook_login_response')
else:
return 'https://' + request.get_host() + reverse('social_login:facebook_login_response')
def auth_url(request):
"""Auth URL.
Returns the facebook auth url using the current app's domain
"""
canvas_url = canv_url(request)
# Permissions set by user. Default is none
perms = settings.SOCIAL_FACEBOOK_PERMISSIONS
url = "https://www.facebook.com/dialog/oauth?"
# Payload
kvps = {'client_id': app_id, 'redirect_uri': canvas_url}
# Add 'next' as state if provided
next_param = f"next_url={quote(request.GET.get('next', ''))}"
# Add 'redirected' as state if provided
redirected_param = f"redirected={request.GET.get('redirected', '')}"
if request.GET.get('next', False):
kvps['state'] = next_param
redirected_param = f',{redirected_param}'
if request.GET.get('redirected', False):
kvps['state'] = kvps.get('state', '') + redirected_param
# Format permissions if needed
if perms:
kvps['scope'] = ",".join(perms)
# Return the url
return url + urlencode(kvps)
def debug_token(token):
"""Debug Token.
Returns debug string from token
"""
return get_graph().debug_access_token(token, app_id, app_secret)
def login_successful(code, request):
"""Login Successful.
Process successful login by creating or updating an user using Facebook's response
"""
canvas_url = canv_url(request)
graph = get_graph()
# Get token info from user
try:
token_info = graph.get_access_token_from_code(code, canvas_url, app_id, app_secret)
except facebook.GraphAPIError:
# For some reason, the auth code has already been used, redirect to login again
return 'auth code used'
# Extract token from token info
access_token = token_info['access_token']
# Debug the token, as per documentation
debug = debug_token(access_token)['data']
# Get the user's scope ID from debug data
social_id = debug['user_id']
token_expires = debug.get('expires_at') - debug.get('issued_at')
if debug.get('expires_at') == 0:
token_expires = 99999999
scopes = debug.get('scopes', [])
# Get some user info like name and url
extra_data = graph.get_object(str(social_id) + '/?fields=name,first_name,last_name,link')
name = extra_data['name']
first_name = extra_data['first_name']
last_name = extra_data['last_name']
link = extra_data.get('link', '')
# Call FacebookUser's method to create or update based on social_id, that returns an facebookuser object
from .models import FacebookUser
new = FacebookUser.create_or_update(social_id, access_token, token_expires, first_name, last_name, name, link, scopes)
# Try to login the user
if new.user.is_active:
login(request, new.user)
messages.add_message(request, messages.SUCCESS, 'Olá, ' + first_name + '!')
else:
messages.add_message(request, messages.ERROR, 'Essa conta foi desativada!')
return request
def login_canceled(request):
# If the user has canceled the login process, or something else happened, do nothing and display error message
messages.add_message(request, messages.ERROR, 'Oops! Algo de errado aconteceu :( Se isso se repetir, fale conosco!')
return request
def decode_state_data(state):
if not state:
return {}
parts = state.split(',')
data = {}
for part in parts:
p = part.split('=')
data[p[0]] = unquote(p[1])
return data
def code_already_used_url(next_url, redirected):
state = {}
if next_url:
state['next'] = next_url
state['redirected'] = int(redirected) + 1 if redirected else 0
return reverse('social_login:facebook_login') + '?' + urlencode(state)
|
agpl-3.0
| 7,889,079,061,397,053,000
| 30.38255
| 122
| 0.663388
| false
| 3.676101
| false
| false
| false
|
LegoStormtroopr/canard
|
SQBLWidgets/sqblUI/statementText.py
|
1
|
1584
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/statementText.ui'
#
# Created: Sat Jul 25 12:17:11 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Form)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.statementText = QtGui.QTextEdit(Form)
self.statementText.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.statementText.setObjectName(_fromUtf8("statementText"))
self.verticalLayout.addWidget(self.statementText)
self.label.setBuddy(self.statementText)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Statement Text</span> - <small>The text shown to a respondent.</small></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-3.0
| -4,289,232,995,782,310,000
| 39.615385
| 250
| 0.696338
| false
| 3.789474
| false
| false
| false
|
samueldeng/crosslan
|
headquarter/iptman.py
|
1
|
2445
|
import iptc
import logging
logging.basicConfig(format='[%(levelname)s]\t%(asctime)s\t%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
log = logging.getLogger("iptman")
class IptMan():
def __init__(self):
pass
@staticmethod
def insert_rule(port):
try:
# New Rule.
rule = iptc.Rule()
rule.protocol = "tcp"
# Add match to the rule.
match = iptc.Match(rule, "tcp")
match.sport = str(port)
rule.add_match(match)
# Add target to the rule.
target = iptc.Target(rule, "ACCEPT")
rule.target = target
# Insert rule to the OUTPUT chain in filter Table.
output_chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT")
output_chain.insert_rule(rule)
except Exception, e:
raise e
@staticmethod
def delete_rule(port):
try:
filter_table = iptc.Table(iptc.Table.FILTER)
output_chain = iptc.Chain(filter_table, "OUTPUT")
rule_del = None
for rule in output_chain.rules:
sport = str(rule.matches[0].parameters["sport"])
if sport == str(port):
rule_del = rule
break
if rule_del is not None:
output_chain.delete_rule(rule_del)
except Exception, e:
raise e
@staticmethod
def get_rule_counter(port):
try:
filter_table = iptc.Table(iptc.Table.FILTER)
filter_table.refresh()
output_chain = iptc.Chain(filter_table, "OUTPUT")
bytes_counts = None
for rule in output_chain.rules:
sport = str(rule.matches[0].parameters["sport"])
# log.debug(rule.get_counters())
if sport == str(port):
counter = rule.get_counters()
packets = counter[0]
bytes_counts = counter[1]
log.debug("packet #:" + str(packets))
log.debug("bytes #:" + str(bytes_counts))
break
if bytes_counts is None:
raise Exception("NotFoundPort")
return bytes_counts
except Exception, e:
raise e
def unit_test():
pass
if __name__ == "__main__":
unit_test()
|
gpl-2.0
| 5,179,356,324,944,784,000
| 27.44186
| 103
| 0.503476
| false
| 4.179487
| false
| false
| false
|
google-research/falken
|
service/generated_flatbuffers/tflite/LessEqualOptions.py
|
1
|
2218
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessEqualOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsLessEqualOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessEqualOptions()
x.Init(buf, n + offset)
return x
@classmethod
def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LessEqualOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def LessEqualOptionsStart(builder): builder.StartObject(0)
def LessEqualOptionsEnd(builder): return builder.EndObject()
class LessEqualOptionsT(object):
# LessEqualOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
lessEqualOptions = LessEqualOptions()
lessEqualOptions.Init(buf, pos)
return cls.InitFromObj(lessEqualOptions)
@classmethod
def InitFromObj(cls, lessEqualOptions):
x = LessEqualOptionsT()
x._UnPack(lessEqualOptions)
return x
# LessEqualOptionsT
def _UnPack(self, lessEqualOptions):
if lessEqualOptions is None:
return
# LessEqualOptionsT
def Pack(self, builder):
LessEqualOptionsStart(builder)
lessEqualOptions = LessEqualOptionsEnd(builder)
return lessEqualOptions
|
apache-2.0
| 3,640,308,164,800,651,300
| 29.805556
| 114
| 0.708747
| false
| 3.857391
| false
| false
| false
|
Southpaw-TACTIC/Team
|
src/python/Lib/site-packages/PySide/examples/itemviews/addressbook/addresswidget.py
|
1
|
10279
|
#!/usr/bin/python
"""**************************************************************************
**
** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation (qt-info@nokia.com)
**
** This file is part of the examples of the Qt Toolkit.
**
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
** the names of its contributors may be used to endorse or promote
** products derived from this software without specific prior written
** permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
**
*****************************************************************************
** February 2011
** - addressbook example ported to PySide by Arun Srinivasan
** <rulfzid@gmail.com>
**************************************************************************"""
try:
import cpickle as pickle
except ImportError:
import pickle
from PySide.QtCore import (Qt, Signal, QRegExp, QModelIndex)
from PySide.QtGui import (QWidget, QTabWidget, QItemSelectionModel,
QMessageBox, QTableView, QSortFilterProxyModel,
QAbstractItemView, QItemSelection)
from tablemodel import TableModel
from newaddresstab import NewAddressTab
from adddialogwidget import AddDialogWidget
class AddressWidget(QTabWidget):
""" The central widget of the application. Most of the addressbook's
functionality is contained in this class.
"""
selectionChanged = Signal(QItemSelection)
def __init__(self, parent=None):
""" Initialize the AddressWidget. """
super(AddressWidget, self).__init__(parent)
self.tableModel = TableModel()
self.newAddressTab = NewAddressTab()
self.newAddressTab.sendDetails.connect(self.addEntry)
self.addTab(self.newAddressTab, "Address Book")
self.setupTabs()
def addEntry(self, name=None, address=None):
""" Add an entry to the addressbook. """
if name is None and address is None:
addDialog = AddDialogWidget()
if addDialog.exec_():
name = addDialog.name
address = addDialog.address
address = {"name": name, "address": address}
addresses = self.tableModel.addresses[:]
# The QT docs for this example state that what we're doing here
# is checking if the entered name already exists. What they
# (and we here) are actually doing is checking if the whole
# name/address pair exists already - ok for the purposes of this
# example, but obviously not how a real addressbook application
# should behave.
try:
addresses.remove(address)
QMessageBox.information(self, "Duplicate Name",
"The name \"%s\" already exists." % name)
except ValueError:
# The address didn't already exist, so let's add it to the model.
# Step 1: create the row
self.tableModel.insertRows(0)
# Step 2: get the index of the newly created row and use it.
# to set the name
ix = self.tableModel.index(0, 0, QModelIndex())
self.tableModel.setData(ix, address["name"], Qt.EditRole)
# Step 3: lather, rinse, repeat for the address.
ix = self.tableModel.index(0, 1, QModelIndex())
self.tableModel.setData(ix, address["address"], Qt.EditRole)
# Remove the newAddressTab, as we now have at least one
# address in the model.
self.removeTab(self.indexOf(self.newAddressTab))
# The screenshot for the QT example shows nicely formatted
# multiline cells, but the actual application doesn't behave
# quite so nicely, at least on Ubuntu. Here we resize the newly
# created row so that multiline addresses look reasonable.
tableView = self.currentWidget()
tableView.resizeRowToContents(ix.row())
def editEntry(self):
""" Edit an entry in the addressbook. """
tableView = self.currentWidget()
proxyModel = tableView.model()
selectionModel = tableView.selectionModel()
# Get the name and address of the currently selected row.
indexes = selectionModel.selectedRows()
for index in indexes:
row = proxyModel.mapToSource(index).row()
ix = self.tableModel.index(row, 0, QModelIndex())
name = self.tableModel.data(ix, Qt.DisplayRole)
ix = self.tableModel.index(row, 1, QModelIndex())
address = self.tableModel.data(ix, Qt.DisplayRole)
# Open an addDialogWidget, and only allow the user to edit the address.
addDialog = AddDialogWidget()
addDialog.setWindowTitle("Edit a Contact")
addDialog.nameText.setReadOnly(True)
addDialog.nameText.setText(name)
addDialog.addressText.setText(address)
# If the address is different, add it to the model.
if addDialog.exec_():
newAddress = addDialog.address
if newAddress != address:
ix = self.tableModel.index(row, 1, QModelIndex())
self.tableModel.setData(ix, newAddress, Qt.EditRole)
def removeEntry(self):
""" Remove an entry from the addressbook. """
tableView = self.currentWidget()
proxyModel = tableView.model()
selectionModel = tableView.selectionModel()
# Just like editEntry, but this time remove the selected row.
indexes = selectionModel.selectedRows()
for index in indexes:
row = proxyModel.mapToSource(index).row()
self.tableModel.removeRows(row)
# If we've removed the last address in the model, display the
# newAddressTab
if self.tableModel.rowCount() == 0:
self.insertTab(0, self.newAddressTab, "Address Book")
def setupTabs(self):
""" Setup the various tabs in the AddressWidget. """
groups = ["ABC", "DEF", "GHI", "JKL", "MNO", "PQR", "STU", "VW", "XYZ"]
for group in groups:
proxyModel = QSortFilterProxyModel(self)
proxyModel.setSourceModel(self.tableModel)
proxyModel.setDynamicSortFilter(True)
tableView = QTableView()
tableView.setModel(proxyModel)
tableView.setSortingEnabled(True)
tableView.setSelectionBehavior(QAbstractItemView.SelectRows)
tableView.horizontalHeader().setStretchLastSection(True)
tableView.verticalHeader().hide()
tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)
tableView.setSelectionMode(QAbstractItemView.SingleSelection)
# This here be the magic: we use the group name (e.g. "ABC") to
# build the regex for the QSortFilterProxyModel for the group's
# tab. The regex will end up looking like "^[ABC].*", only
# allowing this tab to display items where the name starts with
# "A", "B", or "C". Notice that we set it to be case-insensitive.
reFilter = "^[%s].*" % group
proxyModel.setFilterRegExp(QRegExp(reFilter, Qt.CaseInsensitive))
proxyModel.setFilterKeyColumn(0) # Filter on the "name" column
proxyModel.sort(0, Qt.AscendingOrder)
tableView.selectionModel().selectionChanged.connect(self.selectionChanged)
self.addTab(tableView, group)
# Note: the QT example uses a QDataStream for the saving and loading.
# Here we're using a python dictionary to store the addresses, which
# can't be streamed using QDataStream, so we just use cpickle for this
# example.
def readFromFile(self, filename):
""" Read contacts in from a file. """
try:
f = open(filename, "rb")
addresses = pickle.load(f)
except IOError:
QMessageBox.information(self, "Unable to open file: %s" % filename)
finally:
f.close()
if len(addresses) == 0:
QMessageBox.information(self, "No contacts in file: %s" % filename)
else:
for address in addresses:
self.addEntry(address["name"], address["address"])
def writeToFile(self, filename):
""" Save all contacts in the model to a file. """
try:
f = open(filename, "wb")
pickle.dump(self.tableModel.addresses, f)
except IOError:
QMessageBox.information(self, "Unable to open file: %s" % filename)
finally:
f.close()
if __name__ == "__main__":
import sys
from PySide.QtGui import QApplication
app = QApplication(sys.argv)
addressWidget = AddressWidget()
addressWidget.show()
sys.exit(app.exec_())
|
epl-1.0
| -137,049,467,144,635,650
| 40.447581
| 86
| 0.628369
| false
| 4.394613
| false
| false
| false
|
googleapis/python-grafeas
|
grafeas/grafeas_v1/services/grafeas/transports/grpc_asyncio.py
|
1
|
26599
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from grafeas.grafeas_v1.types import grafeas
from .base import GrafeasTransport, DEFAULT_CLIENT_INFO
from .grpc import GrafeasGrpcTransport
class GrafeasGrpcAsyncIOTransport(GrafeasTransport):
"""gRPC AsyncIO backend transport for Grafeas.
`Grafeas <https://grafeas.io>`__ API.
Retrieves analysis results of Cloud components such as Docker
container images.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis
instance on a resource. An occurrence refers to a ``Note``. A note
contains details describing the analysis and is generally stored in
a separate project, called a ``Provider``. Multiple occurrences can
refer to the same note.
For example, an SSL vulnerability could affect multiple images. In
this case, there would be one note for the vulnerability and an
occurrence for each image with the vulnerability referring to that
note.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_occurrence(
self,
) -> Callable[[grafeas.GetOccurrenceRequest], Awaitable[grafeas.Occurrence]]:
r"""Return a callable for the get occurrence method over gRPC.
Gets the specified occurrence.
Returns:
Callable[[~.GetOccurrenceRequest],
Awaitable[~.Occurrence]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_occurrence" not in self._stubs:
self._stubs["get_occurrence"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/GetOccurrence",
request_serializer=grafeas.GetOccurrenceRequest.serialize,
response_deserializer=grafeas.Occurrence.deserialize,
)
return self._stubs["get_occurrence"]
@property
def list_occurrences(
self,
) -> Callable[
[grafeas.ListOccurrencesRequest], Awaitable[grafeas.ListOccurrencesResponse]
]:
r"""Return a callable for the list occurrences method over gRPC.
Lists occurrences for the specified project.
Returns:
Callable[[~.ListOccurrencesRequest],
Awaitable[~.ListOccurrencesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_occurrences" not in self._stubs:
self._stubs["list_occurrences"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/ListOccurrences",
request_serializer=grafeas.ListOccurrencesRequest.serialize,
response_deserializer=grafeas.ListOccurrencesResponse.deserialize,
)
return self._stubs["list_occurrences"]
@property
def delete_occurrence(
self,
) -> Callable[[grafeas.DeleteOccurrenceRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete occurrence method over gRPC.
Deletes the specified occurrence. For example, use
this method to delete an occurrence when the occurrence
is no longer applicable for the given resource.
Returns:
Callable[[~.DeleteOccurrenceRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_occurrence" not in self._stubs:
self._stubs["delete_occurrence"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/DeleteOccurrence",
request_serializer=grafeas.DeleteOccurrenceRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_occurrence"]
@property
def create_occurrence(
self,
) -> Callable[[grafeas.CreateOccurrenceRequest], Awaitable[grafeas.Occurrence]]:
r"""Return a callable for the create occurrence method over gRPC.
Creates a new occurrence.
Returns:
Callable[[~.CreateOccurrenceRequest],
Awaitable[~.Occurrence]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_occurrence" not in self._stubs:
self._stubs["create_occurrence"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/CreateOccurrence",
request_serializer=grafeas.CreateOccurrenceRequest.serialize,
response_deserializer=grafeas.Occurrence.deserialize,
)
return self._stubs["create_occurrence"]
@property
def batch_create_occurrences(
self,
) -> Callable[
[grafeas.BatchCreateOccurrencesRequest],
Awaitable[grafeas.BatchCreateOccurrencesResponse],
]:
r"""Return a callable for the batch create occurrences method over gRPC.
Creates new occurrences in batch.
Returns:
Callable[[~.BatchCreateOccurrencesRequest],
Awaitable[~.BatchCreateOccurrencesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_create_occurrences" not in self._stubs:
self._stubs["batch_create_occurrences"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/BatchCreateOccurrences",
request_serializer=grafeas.BatchCreateOccurrencesRequest.serialize,
response_deserializer=grafeas.BatchCreateOccurrencesResponse.deserialize,
)
return self._stubs["batch_create_occurrences"]
@property
def update_occurrence(
self,
) -> Callable[[grafeas.UpdateOccurrenceRequest], Awaitable[grafeas.Occurrence]]:
r"""Return a callable for the update occurrence method over gRPC.
Updates the specified occurrence.
Returns:
Callable[[~.UpdateOccurrenceRequest],
Awaitable[~.Occurrence]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_occurrence" not in self._stubs:
self._stubs["update_occurrence"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/UpdateOccurrence",
request_serializer=grafeas.UpdateOccurrenceRequest.serialize,
response_deserializer=grafeas.Occurrence.deserialize,
)
return self._stubs["update_occurrence"]
@property
def get_occurrence_note(
self,
) -> Callable[[grafeas.GetOccurrenceNoteRequest], Awaitable[grafeas.Note]]:
r"""Return a callable for the get occurrence note method over gRPC.
Gets the note attached to the specified occurrence.
Consumer projects can use this method to get a note that
belongs to a provider project.
Returns:
Callable[[~.GetOccurrenceNoteRequest],
Awaitable[~.Note]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_occurrence_note" not in self._stubs:
self._stubs["get_occurrence_note"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/GetOccurrenceNote",
request_serializer=grafeas.GetOccurrenceNoteRequest.serialize,
response_deserializer=grafeas.Note.deserialize,
)
return self._stubs["get_occurrence_note"]
@property
def get_note(self) -> Callable[[grafeas.GetNoteRequest], Awaitable[grafeas.Note]]:
r"""Return a callable for the get note method over gRPC.
Gets the specified note.
Returns:
Callable[[~.GetNoteRequest],
Awaitable[~.Note]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_note" not in self._stubs:
self._stubs["get_note"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/GetNote",
request_serializer=grafeas.GetNoteRequest.serialize,
response_deserializer=grafeas.Note.deserialize,
)
return self._stubs["get_note"]
@property
def list_notes(
self,
) -> Callable[[grafeas.ListNotesRequest], Awaitable[grafeas.ListNotesResponse]]:
r"""Return a callable for the list notes method over gRPC.
Lists notes for the specified project.
Returns:
Callable[[~.ListNotesRequest],
Awaitable[~.ListNotesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_notes" not in self._stubs:
self._stubs["list_notes"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/ListNotes",
request_serializer=grafeas.ListNotesRequest.serialize,
response_deserializer=grafeas.ListNotesResponse.deserialize,
)
return self._stubs["list_notes"]
@property
def delete_note(
self,
) -> Callable[[grafeas.DeleteNoteRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete note method over gRPC.
Deletes the specified note.
Returns:
Callable[[~.DeleteNoteRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_note" not in self._stubs:
self._stubs["delete_note"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/DeleteNote",
request_serializer=grafeas.DeleteNoteRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_note"]
@property
def create_note(
self,
) -> Callable[[grafeas.CreateNoteRequest], Awaitable[grafeas.Note]]:
r"""Return a callable for the create note method over gRPC.
Creates a new note.
Returns:
Callable[[~.CreateNoteRequest],
Awaitable[~.Note]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_note" not in self._stubs:
self._stubs["create_note"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/CreateNote",
request_serializer=grafeas.CreateNoteRequest.serialize,
response_deserializer=grafeas.Note.deserialize,
)
return self._stubs["create_note"]
@property
def batch_create_notes(
self,
) -> Callable[
[grafeas.BatchCreateNotesRequest], Awaitable[grafeas.BatchCreateNotesResponse]
]:
r"""Return a callable for the batch create notes method over gRPC.
Creates new notes in batch.
Returns:
Callable[[~.BatchCreateNotesRequest],
Awaitable[~.BatchCreateNotesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_create_notes" not in self._stubs:
self._stubs["batch_create_notes"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/BatchCreateNotes",
request_serializer=grafeas.BatchCreateNotesRequest.serialize,
response_deserializer=grafeas.BatchCreateNotesResponse.deserialize,
)
return self._stubs["batch_create_notes"]
@property
def update_note(
self,
) -> Callable[[grafeas.UpdateNoteRequest], Awaitable[grafeas.Note]]:
r"""Return a callable for the update note method over gRPC.
Updates the specified note.
Returns:
Callable[[~.UpdateNoteRequest],
Awaitable[~.Note]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_note" not in self._stubs:
self._stubs["update_note"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/UpdateNote",
request_serializer=grafeas.UpdateNoteRequest.serialize,
response_deserializer=grafeas.Note.deserialize,
)
return self._stubs["update_note"]
@property
def list_note_occurrences(
self,
) -> Callable[
[grafeas.ListNoteOccurrencesRequest],
Awaitable[grafeas.ListNoteOccurrencesResponse],
]:
r"""Return a callable for the list note occurrences method over gRPC.
Lists occurrences referencing the specified note.
Provider projects can use this method to get all
occurrences across consumer projects referencing the
specified note.
Returns:
Callable[[~.ListNoteOccurrencesRequest],
Awaitable[~.ListNoteOccurrencesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_note_occurrences" not in self._stubs:
self._stubs["list_note_occurrences"] = self.grpc_channel.unary_unary(
"/grafeas.v1.Grafeas/ListNoteOccurrences",
request_serializer=grafeas.ListNoteOccurrencesRequest.serialize,
response_deserializer=grafeas.ListNoteOccurrencesResponse.deserialize,
)
return self._stubs["list_note_occurrences"]
__all__ = ("GrafeasGrpcAsyncIOTransport",)
|
apache-2.0
| -3,522,779,740,541,284,000
| 41.626603
| 89
| 0.617956
| false
| 4.548393
| false
| false
| false
|
terranum-ch/GraphLink
|
graphlink/ui/gkui_node_manager.py
|
1
|
2716
|
#!/urs/bin/python
import os
import wx
from ..core.gk_node import GKNode
from .gkui_node_dlg import GKUINodeEditDialog
class GKUINodeManager(object):
def __init__(self, parentframe, listctrl):
self.m_listctrl = listctrl
assert (self.m_listctrl is not None), "listctrl is None!"
self.m_parent_frame = parentframe
self.m_nodes = []
self.m_node_paths = []
def add_node_path(self, nodepath):
"""specify search path for nodes"""
if nodepath not in self.m_node_paths:
self.m_node_paths.append(nodepath)
def has_node_paths(self):
"""return True if some nodes path are defined"""
if len(self.m_node_paths) == 0:
return False
return True
def add_node_to_list(self, node):
"""add node to the internal list if it isn't already present"""
if node not in self.m_nodes:
self.m_nodes.append(node)
def get_node_count(self):
"""get the number of nodes"""
return len(self.m_nodes)
def reload_path(self):
"""clear the list ctrl and parse the node paths"""
for path in self.m_node_paths:
if os.path.exists(path) is False:
wx.LogError("{} didn't exist!".format(path))
else:
for myfile in os.listdir(path):
if myfile.endswith(".gkn"): # node files
node = GKNode()
if node.load_from_file(myfile) is False:
wx.LogWarning("Error loading: {}".format(myfile))
else:
self.add_node_to_list(node)
# reload the node list
self.reload_list()
def reload_list(self):
"""reload the node list"""
self.m_listctrl.DeleteAllItems()
for index, node in enumerate(self.m_nodes):
self.m_listctrl.Append([index + 1, node.m_name])
def add_node_dialog(self):
"""display the add node dialog"""
mynode = GKNode()
myDlg = GKUINodeEditDialog(self.m_parent_frame, mynode)
if myDlg.ShowModal() == wx.ID_SAVE:
self.add_node_to_list(mynode)
self.reload_list()
def edit_node_dialog(self):
"""display the edit node dialog"""
my_node_index = self.m_listctrl.GetFirstSelected()
if my_node_index == -1:
wx.LogWarning("Nothing selected, select à node first!")
return False
my_node = self.m_nodes[my_node_index]
assert(my_node)
myDlg = GKUINodeEditDialog(self.m_parent_frame, my_node)
if myDlg.ShowModal() == wx.ID_SAVE:
self.reload_list()
|
apache-2.0
| -1,177,501,102,955,131,400
| 31.710843
| 77
| 0.559116
| false
| 3.71409
| false
| false
| false
|
chrplr/AIP2015
|
resources/python-scripts/dualscope.py
|
1
|
28502
|
#!/usr/bin/env python
"""
Oscilloscope + spectrum analyser in Python.
------------------------------------------------------------
Copyright (C) 2008, Roger Fearick, University of Cape Town
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
------------------------------------------------------------
Version 0.7c
Dependencies:
uumpy -- numerics, fft
PyQt4, PyQwt5 -- gui, graphics
pyaudio -- sound card -- Enthought unstable branch!
This code provides an oscillator and spectrum analyzer using
the PC sound card as input.
The interface, based on qwt, uses a familar 'knob based' layout
so that it approximates an analogue scope.
Two traces are provided with imput via the sound card "line in" jack.
Traces can be averaged to reduce influence of noise.
The cross-correlation between the inputs can be computed.
The spectrum analyser has both log (dB) scale and linear scale.
A cross hair status display permits the reading ov values off the screen.
Printing is provided.
"""
# dualscope6.py derived from dualscopy5.py 11/8/05
# adds autocorrelation
# Update for Qt4: 4-11/10/2007 rwf
# dualscope7.py: use pyaudio 27/2/08 rwf
import sys
from PyQt4 import Qt
from PyQt4 import Qwt5 as Qwt
from numpy import *
import numpy.fft as FFT
import pyaudio
import icons # part of this package -- toolbar icons
# audio setup
CHUNK = 8192 # input buffer size in frames
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 48000 # depends on sound card: 96000 might be possible
# scope configuration
BOTHLR=0
LEFT=1
RIGHT=2
soundbuffersize=CHUNK
samplerate=float(RATE)
scopeheight=350
LRchannel=BOTHLR
PENWIDTH=2
# status messages
freezeInfo = 'Freeze: Press mouse button and drag'
cursorInfo = 'Cursor Pos: Press mouse button in plot region'
# utility classes
class LogKnob(Qwt.QwtKnob):
"""
Provide knob with log scale
"""
def __init__(self, *args):
apply(Qwt.QwtKnob.__init__, (self,) + args)
self.setScaleEngine(Qwt.QwtLog10ScaleEngine())
def setRange(self,minR,maxR):
self.setScale(minR,maxR)
Qwt.QwtKnob.setRange(self, log10(minR), log10(maxR), 0.333333)
def setValue(self,val):
Qwt.QwtKnob.setValue(self,log10(val))
class LblKnob:
"""
Provide knob with a label
"""
def __init__(self, wgt, x,y, name, logscale=0):
if logscale:
self.knob=LogKnob(wgt)
else:
self.knob=Qwt.QwtKnob(wgt)
color=Qt.QColor(200,200,210)
self.knob.palette().setColor(Qt.QPalette.Active,
Qt.QPalette.Button,
color )
self.lbl=Qt.QLabel(name, wgt)
self.knob.setGeometry(x, y, 140, 100)
# oooh, eliminate this ...
if name[0]=='o': self.knob.setKnobWidth(40)
self.lbl.setGeometry(x, y+90, 140, 15)
self.lbl.setAlignment(Qt.Qt.AlignCenter)
def setRange(self,*args):
apply(self.knob.setRange, args)
def setValue(self,*args):
apply(self.knob.setValue, args)
def setScaleMaxMajor(self,*args):
apply(self.knob.setScaleMaxMajor, args)
class Scope(Qwt.QwtPlot):
"""
Oscilloscope display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Scope');
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine))
self.grid.attach(self)
# axes
self.enableAxis(Qwt.QwtPlot.yRight);
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Time [s]');
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Amplitude [V]');
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0);
self.setAxisScaleEngine(Qwt.QwtPlot.yRight, Qwt.QwtLinearScaleEngine());
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0);
self.setAxisMaxMajor(Qwt.QwtPlot.yRight, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.yRight, 0);
# curves for scope traces: 2 first so 1 is on top
self.curve2 = Qwt.QwtPlotCurve('Trace2')
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta,PENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yRight)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('Trace1')
self.curve1.setPen(Qt.QPen(Qt.Qt.blue,PENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
# default settings
self.triggerval=0.0
self.maxamp=1.0
self.maxamp2=1.0
self.freeze=0
self.average=0
self.autocorrelation=0
self.avcount=0
self.datastream = None
self.offset1=0.0
self.offset2=0.0
# set data
# NumPy: f, g, a and p are arrays!
self.dt=1.0/samplerate
self.f = arange(0.0, 1.0, self.dt)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.f, self.a1)
self.curve2.setData(self.f, self.a2)
# start self.timerEvent() callbacks running
self.startTimer(100)
# plot
self.replot()
# convenience methods for knob callbacks
def setMaxAmp(self, val):
self.maxamp=val
def setMaxAmp2(self, val):
self.maxamp2=val
def setMaxTime(self, val):
self.maxtime=val
def setOffset1(self, val):
self.offset1=val
def setOffset2(self, val):
self.offset2=val
def setTriggerLevel(self, val):
self.triggerval=val
# plot scope traces
def setDisplay(self):
l=len(self.a1)
if LRchannel==BOTHLR:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif LRchannel==RIGHT:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif LRchannel==LEFT:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index],self.a[index]
def setAverage(self, state):
self.average = state
self.avcount=0
def setAutoc(self, state):
self.autocorrelation = state
self.avcount=0
def setFreeze(self, freeze):
self.freeze = 1-self.freeze
def setDatastream(self, datastream):
self.datastream = datastream
# timer callback that does the work
def timerEvent(self,e): # Scope
if self.datastream == None: return
x=self.datastream.read(CHUNK)
if self.freeze==1 or self.avcount>16: return
X=fromstring(x,dtype='h')
if len(X) == 0: return
P=array(X,dtype='d')/32768.0
val=self.triggerval*self.maxamp
i=0
R=P[0::2]
L=P[1::2]
if self.autocorrelation:
lenX=len(R)
if lenX == 0: return
if lenX!=soundbuffersize:
print lenX
window=blackman(lenX)
A1=FFT.fft(R*window) #lenX
A2=FFT.fft(L*window) #lenX
B2=(A1*conjugate(A2))/10.0
R=FFT.ifft(B2).real
else: # normal scope
# set trigger levels
for i in range(len(R)-1):
if R[i]<val and R[i+1]>=val: break
if i > len(R)-2: i=0
R=R[i:]
L=L[i:]
if self.average == 0:
self.a1=R
self.a2=L
else:
self.avcount+=1
if self.avcount==1:
self.sumR=R
self.sumL=L
else:
lp=min(len(R),len(self.sumR))
self.sumR=self.sumR[:lp]+R[:lp]
self.sumL=self.sumL[:lp]+L[:lp]
self.a1=self.sumR/self.avcount
self.a2=self.sumL/self.avcount
self.setDisplay()
inittime=0.01
initamp=0.1
class ScopeFrame(Qt.QFrame):
"""
Oscilloscope widget --- contains controls + display
"""
def __init__(self, *args):
apply(Qt.QFrame.__init__, (self,) + args)
# the following: setPal.. doesn't seem to work on Win
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
knobpos=scopeheight+30
self.setFixedSize(700, scopeheight+150)
self.freezeState = 0
self.knbLevel = LblKnob(self,560,50,"Trigger level")
self.knbTime = LblKnob(self,560, 220,"Time", 1)
self.knbSignal = LblKnob(self,150, knobpos, "Signal1",1)
self.knbSignal2 = LblKnob(self,450, knobpos, "Signal2",1)
self.knbOffset1=LblKnob(self,10, knobpos,"offset1")
self.knbOffset2=LblKnob(self,310, knobpos,"offset2")
self.knbTime.setRange(0.0001, 1.0)
self.knbTime.setValue(0.01)
self.knbSignal.setRange(0.0001, 1.0)
self.knbSignal.setValue(0.1)
self.knbSignal2.setRange(0.0001, 1.0)
self.knbSignal2.setValue(0.1)
self.knbOffset2.setRange(-1.0, 1.0, 0.001)
self.knbOffset2.setValue(0.0)
self.knbOffset1.setRange(-1.0, 1.0, 0.001)
self.knbOffset1.setValue(0.0)
self.knbLevel.setRange(-1.0, 1.0, 0.001)
self.knbLevel.setValue(0.1)
self.knbLevel.setScaleMaxMajor(10)
self.plot = Scope(self)
self.plot.setGeometry(10, 10, 550, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(0.01)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.connect(self.knbSignal2.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude2)
self.knbSignal.setValue(0.1)
self.connect(self.knbLevel.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTriggerlevel)
self.connect(self.knbOffset1.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset1)
self.connect(self.knbOffset2.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset2)
self.knbLevel.setValue(0.1)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*inittime)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -5.0*initamp, 5.0*initamp)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -5.0*initamp, 5.0*initamp)
self.plot.show()
def _calcKnobVal(self,val):
ival=floor(val)
frac=val-ival
if frac >=0.9:
frac=1.0
elif frac>=0.66:
frac=log10(5.0)
elif frac>=log10(2.0):
frac=log10(2.0)
else: frac=0.0
dt=10**frac*10**ival
return dt
def setTimebase(self, val):
dt=self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*dt)
self.plot.replot()
def setAmplitude(self, val):
dt=self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -5.0*dt, 5.0*dt)
self.plot.setMaxAmp( 5.0*dt )
self.plot.replot()
def setAmplitude2(self, val):
dt=self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -5.0*dt, 5.0*dt)
self.plot.setMaxAmp2( 5.0*dt )
self.plot.replot()
def setTriggerlevel(self, val):
self.plot.setTriggerLevel(val)
self.plot.setDisplay()
#--------------------------------------------------------------------
class FScope(Qwt.QwtPlot):
"""
Power spectrum display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Power spectrum');
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine));
self.grid.attach(self)
# axes
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Frequency [Hz]');
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Power [dB]');
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0);
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0);
# curves
self.curve2 = Qwt.QwtPlotCurve('PSTrace2')
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta,PENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yLeft)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('PSTrace1')
self.curve1.setPen(Qt.QPen(Qt.Qt.blue,PENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
self.triggerval=0.0
self.maxamp=1.0
self.freeze=0
self.average=0
self.avcount=0
self.logy=1
self.datastream=None
self.dt=1.0/samplerate
self.df=1.0/(soundbuffersize*self.dt)
self.f = arange(0.0, samplerate, self.df)
self.a = 0.0*self.f
self.p = 0.0*self.f
self.curve1.setData(self.f, self.a)
self.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*initfreq)
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.startTimer(100)
self.replot()
def resetBuffer(self):
self.df=1.0/(soundbuffersize*self.dt)
self.f = arrayrange(0.0, 20000.0, self.df)
self.a = 0.0*self.f
self.p = 0.0*self.f
self.curve1.setData(self.curve1, self.f, self.a)
def setMaxAmp(self, val):
if val>0.6:
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.logy=1
else:
self.setAxisScale( Qwt.QwtPlot.yLeft, 0.0, 10.0*val)
self.logy=0
self.maxamp=val
def setMaxTime(self, val):
self.maxtime=val
def setTriggerLevel(self, val):
self.triggerval=val
def setDisplay(self):
n=soundbuffersize/2
if LRchannel==BOTHLR:
self.curve1.setData(self.f[0:n], self.a[:n])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif LRchannel==RIGHT:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif LRchannel==LEFT:
self.curve1.setData(self.f[0:n], self.a[:n])
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index],self.a[index]
def setAverage(self, state):
self.average = state
self.avcount=0
def setFreeze(self, freeze):
self.freeze = 1-self.freeze
def setDatastream(self, datastream):
self.datastream = datastream
def timerEvent(self,e): # FFT
if self.datastream == None: return
x=self.datastream.read(CHUNK)
if self.freeze==1: return
X=fromstring(x,dtype='h')
if len(X) == 0: return
P=array(X,dtype='d')/32768.0
val=self.triggerval*self.maxamp
i=0
R=P[0::2]
L=P[1::2]
lenX=len(R)
if lenX == 0: return
if lenX!=(CHUNK): print 'size fail',lenX
window=blackman(lenX)
sumw=sum(window*window)
A=FFT.fft(R*window) #lenX
B=(A*conjugate(A)).real
A=FFT.fft(L*window) #lenX
B2=(A*conjugate(A)).real
sumw*=2.0 # sym about Nyquist (*4); use rms (/2)
sumw/=self.dt # sample rate
B=B/sumw
B2=B2/sumw
if self.logy:
P1=log10(B)*10.0+20.0#60.0
P2=log10(B2)*10.0+20.0#60.0
else:
P1=B
P2=B2
if self.average == 0:
self.a=P1
self.a2=P2
else:
self.avcount+=1
if self.avcount==1:
self.sumP1=P1
self.sumP2=P2
else:
self.sumP1=self.sumP1+P1
self.sumP2=self.sumP2+P2
self.a=self.sumP1/self.avcount
self.a2=self.sumP2/self.avcount
self.setDisplay()
initfreq=100.0
class FScopeFrame(Qt.QFrame):
"""
Power spectrum widget --- contains controls + display
"""
def __init__(self , *args):
apply(Qt.QFrame.__init__, (self,) + args)
knobpos=scopeheight+30
# the following: setPal.. doesn't seem to work on Ein
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
self.setFixedSize(700, scopeheight+150)
self.freezeState = 0
self.knbSignal = LblKnob(self,160, knobpos, "Signal",1)
self.knbTime = LblKnob(self,310, knobpos,"Frequency", 1)
self.knbTime.setRange(1.0, 2000.0)
self.knbSignal.setRange(0.0000001, 1.0)
self.plot = FScope(self)
self.plot.setGeometry(10, 10, 500, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(1000.0)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.knbSignal.setValue(1.0)
self.plot.show()
def _calcKnobVal(self,val):
ival=floor(val)
frac=val-ival
if frac >=0.9:
frac=1.0
elif frac>=0.66:
frac=log10(5.0)
elif frac>=log10(2.0):
frac=log10(2.0)
else: frac=0.0
dt=10**frac*10**ival
return dt
def setTimebase(self, val):
dt=self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*dt)
self.plot.replot()
def setAmplitude(self, val):
dt=self._calcKnobVal(val)
self.plot.setMaxAmp( dt )
self.plot.replot()
#---------------------------------------------------------------------
class FScopeDemo(Qt.QMainWindow):
"""
Application container widget
Contains scope and power spectrum analyser in tabbed windows.
Enables switching between the two.
Handles toolbar and status.
"""
def __init__(self, *args):
apply(Qt.QMainWindow.__init__, (self,) + args)
self.freezeState = 0
self.changeState = 0
self.averageState = 0
self.autocState = 0
self.scope = ScopeFrame(self)
self.current = self.scope
self.pwspec = FScopeFrame(self)
self.pwspec.hide()
self.stack=Qt.QTabWidget(self)
self.stack.addTab(self.scope,"scope")
self.stack.addTab(self.pwspec,"fft")
self.setCentralWidget(self.stack)
toolBar = Qt.QToolBar(self)
self.addToolBar(toolBar)
sb=self.statusBar()
sbfont=Qt.QFont("Helvetica",12)
sb.setFont(sbfont)
self.btnFreeze = Qt.QToolButton(toolBar)
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.btnFreeze.setCheckable(True)
self.btnFreeze.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnFreeze)
self.btnPrint = Qt.QToolButton(toolBar)
self.btnPrint.setText("Print")
self.btnPrint.setIcon(Qt.QIcon(Qt.QPixmap(icons.print_xpm)))
self.btnPrint.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnPrint)
self.btnMode = Qt.QToolButton(toolBar)
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
self.btnMode.setCheckable(True)
self.btnMode.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnMode)
self.btnAvge = Qt.QToolButton(toolBar)
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAvge.setCheckable(True)
self.btnAvge.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAvge)
self.btnAutoc = Qt.QToolButton(toolBar)
self.btnAutoc.setText("correlate")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAutoc.setCheckable(True)
self.btnAutoc.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAutoc)
self.lstLabl = Qt.QLabel("Buffer:",toolBar)
toolBar.addWidget(self.lstLabl)
self.lstChan = Qt.QComboBox(toolBar)
self.lstChan.insertItem(0,"8192")
self.lstChan.insertItem(1,"16k")
self.lstChan.insertItem(2,"32k")
toolBar.addWidget(self.lstChan)
self.lstLR = Qt.QLabel("Channels:",toolBar)
toolBar.addWidget(self.lstLR)
self.lstLRmode = Qt.QComboBox(toolBar)
self.lstLRmode.insertItem(0,"LR")
self.lstLRmode.insertItem(1,"L")
self.lstLRmode.insertItem(2,"R")
toolBar.addWidget(self.lstLRmode)
self.connect(self.btnPrint, Qt.SIGNAL('clicked()'), self.printPlot)
self.connect(self.btnFreeze, Qt.SIGNAL('toggled(bool)'), self.freeze)
self.connect(self.btnMode, Qt.SIGNAL('toggled(bool)'), self.mode)
self.connect(self.btnAvge, Qt.SIGNAL('toggled(bool)'), self.average)
self.connect(self.btnAutoc, Qt.SIGNAL('toggled(bool)'),
self.autocorrelation)
self.connect(self.lstChan, Qt.SIGNAL('activated(int)'), self.fftsize)
self.connect(self.lstLRmode, Qt.SIGNAL('activated(int)'), self.channel)
self.connect(self.scope.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.scope.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.pwspec.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.pwspec.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.stack,
Qt.SIGNAL('currentChanged(int)'),
self.mode)
self.showInfo(cursorInfo)
def showInfo(self, text):
self.statusBar().showMessage(text)
def printPlot(self):
p = QPrinter()
if p.setup():
self.current.plot.printPlot(p)#, Qwt.QwtFltrDim(200));
def fftsize(self, item):
pass
## global s, soundbuffersize
## s.stop()
## s.close()
## if item==2:
## soundbuffersize=8192*3
## elif item==1:
## soundbuffersize=8192*2
## else:
## soundbuffersize=8192
## s=f.stream(48000,2,'int16',soundbuffersize,1)
## s.open()
## s.start()
## self.pwspec.plot.resetBuffer()
## if self.current==self.pwspec:
## self.pwspec.plot.setDatastream(s)
## self.pwspec.plot.avcount=0
## else:
## self.scope.plot.setDatastream(s)
def channel(self, item):
global LRchannel
if item==2:
LRchannel=RIGHT
elif item==1:
LRchannel=LEFT
else:
LRchannel=BOTHLR
def freeze(self, on):
if on:
self.freezeState = 1
self.btnFreeze.setText("Run")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.goicon)))
else:
self.freezeState = 0
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.scope.plot.setFreeze(self.freezeState)
self.pwspec.plot.setFreeze(self.freezeState)
def average(self, on):
if on:
self.averageState = 1
self.btnAvge.setText("single")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.averageState = 0
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAverage(self.averageState)
self.pwspec.plot.setAverage(self.averageState)
def autocorrelation(self, on):
if on:
self.autocState = 1
self.btnAutoc.setText("normal")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.autocState = 0
self.btnAutoc.setText("correlate")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAutoc(self.autocState)
def mode(self, on):
if on:
self.changeState=1
self.current=self.pwspec
self.btnMode.setText("scope")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.scope)))
else:
self.changeState=0
self.current=self.scope
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
if self.changeState==1:
self.stack.setCurrentIndex(self.changeState)
self.scope.plot.setDatastream(None)
self.pwspec.plot.setDatastream(stream)
else:
self.stack.setCurrentIndex(self.changeState)
self.pwspec.plot.setDatastream(None)
self.scope.plot.setDatastream(stream)
def moved(self, e):
if self.changeState==1:
name='Freq'
else:
name='Time'
frequency = self.current.plot.invTransform(Qwt.QwtPlot.xBottom, e.x())
amplitude = self.current.plot.invTransform(Qwt.QwtPlot.yLeft, e.y())
if name=='Time':
df=self.scope.plot.dt
i=int(frequency/df)
ampa=self.scope.plot.a1[i]
ampb=self.scope.plot.a2[i]
else:
df=self.pwspec.plot.df
i=int(frequency/df)
ampa=self.pwspec.plot.a[i]
ampb=self.pwspec.plot.a2[i]
self.showInfo('%s=%g, cursor=%g, A=%g, B=%g' %
(name,frequency, amplitude,ampa,ampb))
def appended(self, e):
print 's'
# Python semantics: self.pos = e.pos() does not work; force a copy
self.xpos = e.x()
self.ypos = e.y()
self.moved(e) # fake a mouse move to show the cursor position
# open sound card data stream
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK)
# Admire!
app = Qt.QApplication(sys.argv)
demo=FScopeDemo()
demo.scope.plot.setDatastream(stream)
demo.show()
app.exec_()
stream.stop_stream()
stream.close()
p.terminate()
|
gpl-2.0
| 2,410,628,168,513,785,300
| 32.026651
| 83
| 0.586941
| false
| 3.21439
| false
| false
| false
|
alexgorban/models
|
official/modeling/tf_utils.py
|
1
|
5438
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TF utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import activations
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed."
)
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs)
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed."
)
def unpack_inputs(inputs):
"""unpack a tuple of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is a special constant tensor, replace it
with None.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if is_special_none_tensor(x):
outputs.append(None)
else:
outputs.append(x)
x = tuple(outputs)
# To trick the very pointless 'unbalanced-tuple-unpacking' pylint check
# from triggering.
if len(x) == 1:
return x[0]
return tuple(outputs)
def is_special_none_tensor(tensor):
"""Checks if a tensor is a special None Tensor."""
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
def get_activation(identifier):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Args:
identifier: String name of the activation function or callable.
Returns:
A Python function corresponding to the activation function.
"""
if isinstance(identifier, six.string_types):
name_to_fn = {
"gelu": activations.gelu,
"simple_swish": activations.simple_swish,
"hard_swish": activations.hard_swish,
"identity": activations.identity,
}
identifier = str(identifier).lower()
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
raise ValueError(
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
"equal to the expected tensor rank `%s`" %
(name, actual_rank, str(tensor.shape), str(expected_rank)))
|
apache-2.0
| 7,756,442,032,010,609,000
| 30.074286
| 80
| 0.690143
| false
| 3.926354
| false
| false
| false
|
PyFilesystem/pyfilesystem
|
fs/rpcfs.py
|
1
|
11326
|
"""
fs.rpcfs
========
This module provides the class 'RPCFS' to access a remote FS object over
XML-RPC. You probably want to use this in conjunction with the 'RPCFSServer'
class from the :mod:`~fs.expose.xmlrpc` module.
"""
import xmlrpclib
import socket
import base64
from fs.base import *
from fs.errors import *
from fs.path import *
from fs import iotools
from fs.filelike import StringIO
import six
from six import PY3, b
def re_raise_faults(func):
"""Decorator to re-raise XML-RPC faults as proper exceptions."""
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except (xmlrpclib.Fault), f:
#raise
# Make sure it's in a form we can handle
print f.faultString
bits = f.faultString.split(" ")
if bits[0] not in ["<type", "<class"]:
raise f
# Find the class/type object
bits = " ".join(bits[1:]).split(">:")
cls = bits[0]
msg = ">:".join(bits[1:])
cls = cls.strip('\'')
print "-" + cls
cls = _object_by_name(cls)
# Re-raise using the remainder of the fault code as message
if cls:
if issubclass(cls, FSError):
raise cls('', msg=msg)
else:
raise cls(msg)
raise f
except socket.error, e:
raise RemoteConnectionError(str(e), details=e)
return wrapper
def _object_by_name(name, root=None):
"""Look up an object by dotted-name notation."""
bits = name.split(".")
if root is None:
try:
obj = globals()[bits[0]]
except KeyError:
try:
obj = __builtins__[bits[0]]
except KeyError:
obj = __import__(bits[0], globals())
else:
obj = getattr(root, bits[0])
if len(bits) > 1:
return _object_by_name(".".join(bits[1:]), obj)
else:
return obj
class ReRaiseFaults:
"""XML-RPC proxy wrapper that re-raises Faults as proper Exceptions."""
def __init__(self, obj):
self._obj = obj
def __getattr__(self, attr):
val = getattr(self._obj, attr)
if callable(val):
val = re_raise_faults(val)
self.__dict__[attr] = val
return val
class RPCFS(FS):
"""Access a filesystem exposed via XML-RPC.
This class provides the client-side logic for accessing a remote FS
object, and is dual to the RPCFSServer class defined in fs.expose.xmlrpc.
Example::
fs = RPCFS("http://my.server.com/filesystem/location/")
"""
_meta = {'thread_safe' : True,
'virtual': False,
'network' : True,
}
def __init__(self, uri, transport=None):
"""Constructor for RPCFS objects.
The only required argument is the URI of the server to connect
to. This will be passed to the underlying XML-RPC server proxy
object, along with the 'transport' argument if it is provided.
:param uri: address of the server
"""
super(RPCFS, self).__init__(thread_synchronize=True)
self.uri = uri
self._transport = transport
self.proxy = self._make_proxy()
self.isdir('/')
@synchronize
def _make_proxy(self):
kwds = dict(allow_none=True, use_datetime=True)
if self._transport is not None:
proxy = xmlrpclib.ServerProxy(self.uri, self._transport, **kwds)
else:
proxy = xmlrpclib.ServerProxy(self.uri, **kwds)
return ReRaiseFaults(proxy)
def __str__(self):
return '<RPCFS: %s>' % (self.uri,)
def __repr__(self):
return '<RPCFS: %s>' % (self.uri,)
@synchronize
def __getstate__(self):
state = super(RPCFS, self).__getstate__()
try:
del state['proxy']
except KeyError:
pass
return state
def __setstate__(self, state):
super(RPCFS, self).__setstate__(state)
self.proxy = self._make_proxy()
def encode_path(self, path):
"""Encode a filesystem path for sending over the wire.
Unfortunately XMLRPC only supports ASCII strings, so this method
must return something that can be represented in ASCII. The default
is base64-encoded UTF8.
"""
return six.text_type(base64.b64encode(path.encode("utf8")), 'ascii')
def decode_path(self, path):
"""Decode paths arriving over the wire."""
return six.text_type(base64.b64decode(path.encode('ascii')), 'utf8')
@synchronize
def getmeta(self, meta_name, default=NoDefaultMeta):
if default is NoDefaultMeta:
meta = self.proxy.getmeta(meta_name)
else:
meta = self.proxy.getmeta_default(meta_name, default)
if isinstance(meta, basestring):
# To allow transport of meta with invalid xml chars (like null)
meta = self.encode_path(meta)
return meta
@synchronize
def hasmeta(self, meta_name):
return self.proxy.hasmeta(meta_name)
@synchronize
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
# TODO: chunked transport of large files
epath = self.encode_path(path)
if "w" in mode:
self.proxy.set_contents(epath, xmlrpclib.Binary(b("")))
if "r" in mode or "a" in mode or "+" in mode:
try:
data = self.proxy.get_contents(epath, "rb").data
except IOError:
if "w" not in mode and "a" not in mode:
raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
raise ParentDirectoryMissingError(path)
self.proxy.set_contents(path, xmlrpclib.Binary(b("")))
else:
data = b("")
f = StringIO(data)
if "a" not in mode:
f.seek(0, 0)
else:
f.seek(0, 2)
oldflush = f.flush
oldclose = f.close
oldtruncate = f.truncate
def newflush():
self._lock.acquire()
try:
oldflush()
self.proxy.set_contents(epath, xmlrpclib.Binary(f.getvalue()))
finally:
self._lock.release()
def newclose():
self._lock.acquire()
try:
f.flush()
oldclose()
finally:
self._lock.release()
def newtruncate(size=None):
self._lock.acquire()
try:
oldtruncate(size)
f.flush()
finally:
self._lock.release()
f.flush = newflush
f.close = newclose
f.truncate = newtruncate
return f
@synchronize
def exists(self, path):
path = self.encode_path(path)
return self.proxy.exists(path)
@synchronize
def isdir(self, path):
path = self.encode_path(path)
return self.proxy.isdir(path)
@synchronize
def isfile(self, path):
path = self.encode_path(path)
return self.proxy.isfile(path)
@synchronize
def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
enc_path = self.encode_path(path)
if not callable(wildcard):
entries = self.proxy.listdir(enc_path,
wildcard,
full,
absolute,
dirs_only,
files_only)
entries = [self.decode_path(e) for e in entries]
else:
entries = self.proxy.listdir(enc_path,
None,
False,
False,
dirs_only,
files_only)
entries = [self.decode_path(e) for e in entries]
entries = [e for e in entries if wildcard(e)]
if full:
entries = [relpath(pathjoin(path, e)) for e in entries]
elif absolute:
entries = [abspath(pathjoin(path, e)) for e in entries]
return entries
@synchronize
def makedir(self, path, recursive=False, allow_recreate=False):
path = self.encode_path(path)
return self.proxy.makedir(path, recursive, allow_recreate)
@synchronize
def remove(self, path):
path = self.encode_path(path)
return self.proxy.remove(path)
@synchronize
def removedir(self, path, recursive=False, force=False):
path = self.encode_path(path)
return self.proxy.removedir(path, recursive, force)
@synchronize
def rename(self, src, dst):
src = self.encode_path(src)
dst = self.encode_path(dst)
return self.proxy.rename(src, dst)
@synchronize
def settimes(self, path, accessed_time, modified_time):
path = self.encode_path(path)
return self.proxy.settimes(path, accessed_time, modified_time)
@synchronize
def getinfo(self, path):
path = self.encode_path(path)
info = self.proxy.getinfo(path)
return info
@synchronize
def desc(self, path):
path = self.encode_path(path)
return self.proxy.desc(path)
@synchronize
def getxattr(self, path, attr, default=None):
path = self.encode_path(path)
attr = self.encode_path(attr)
return self.fs.getxattr(path, attr, default)
@synchronize
def setxattr(self, path, attr, value):
path = self.encode_path(path)
attr = self.encode_path(attr)
return self.fs.setxattr(path, attr, value)
@synchronize
def delxattr(self, path, attr):
path = self.encode_path(path)
attr = self.encode_path(attr)
return self.fs.delxattr(path, attr)
@synchronize
def listxattrs(self, path):
path = self.encode_path(path)
return [self.decode_path(a) for a in self.fs.listxattrs(path)]
@synchronize
def copy(self, src, dst, overwrite=False, chunk_size=16384):
src = self.encode_path(src)
dst = self.encode_path(dst)
return self.proxy.copy(src, dst, overwrite, chunk_size)
@synchronize
def move(self, src, dst, overwrite=False, chunk_size=16384):
src = self.encode_path(src)
dst = self.encode_path(dst)
return self.proxy.move(src, dst, overwrite, chunk_size)
@synchronize
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
src = self.encode_path(src)
dst = self.encode_path(dst)
return self.proxy.movedir(src, dst, overwrite, ignore_errors, chunk_size)
@synchronize
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
src = self.encode_path(src)
dst = self.encode_path(dst)
return self.proxy.copydir(src, dst, overwrite, ignore_errors, chunk_size)
|
bsd-3-clause
| 6,380,686,112,742,397,000
| 30.373961
| 123
| 0.555183
| false
| 4.05659
| false
| false
| false
|
orlenko/bccf
|
src/pybb/permissions.py
|
1
|
6478
|
# -*- coding: utf-8 -*-
"""
Extensible permission system for pybbm
"""
from django.utils.importlib import import_module
from django.db.models import Q
from pybb import defaults
def _resolve_class(name):
""" resolves a class function given as string, returning the function """
if not name: return False
modname, funcname = name.rsplit('.', 1)
return getattr(import_module(modname), funcname)()
class DefaultPermissionHandler(object):
"""
Default Permission handler. If you want to implement custom permissions (for example,
private forums based on some application-specific settings), you can inherit from this
class and override any of the `filter_*` and `may_*` methods. Methods starting with
`may` are expected to return `True` or `False`, whereas methods starting with `filter_*`
should filter the queryset they receive, and return a new queryset containing only the
objects the user is allowed to see.
To activate your custom permission handler, set `settings.PYBB_PERMISSION_HANDLER` to
the full qualified name of your class, e.g. "`myapp.pybb_adapter.MyPermissionHandler`".
"""
#
# permission checks on categories
#
def filter_categories(self, user, qs):
""" return a queryset with categories `user` is allowed to see """
return qs.filter(hidden=False) if not user.is_staff else qs
def may_view_category(self, user, category):
""" return True if `user` may view this category, False if not """
return user.is_staff or not category.hidden
#
# permission checks on forums
#
def filter_forums(self, user, qs):
""" return a queryset with forums `user` is allowed to see """
return qs.filter(Q(hidden=False) & Q(category__hidden=False)) if not user.is_staff else qs
def may_view_forum(self, user, forum):
""" return True if user may view this forum, False if not """
return user.is_staff or ( forum.hidden == False and forum.category.hidden == False )
def may_create_topic(self, user, forum):
""" return True if `user` is allowed to create a new topic in `forum` """
return user.has_perm('pybb.add_post')
#
# permission checks on topics
#
def filter_topics(self, user, qs):
""" return a queryset with topics `user` is allowed to see """
if not user.is_staff:
qs = qs.filter(Q(forum__hidden=False) & Q(forum__category__hidden=False))
if not user.is_superuser:
if user.is_authenticated():
qs = qs.filter(Q(forum__moderators=user) | Q(user=user) | Q(on_moderation=False)).distinct()
else:
qs = qs.filter(on_moderation=False)
return qs
def may_view_topic(self, user, topic):
""" return True if user may view this topic, False otherwise """
if user.is_superuser:
return True
if not user.is_staff and (topic.forum.hidden or topic.forum.category.hidden):
return False # only staff may see hidden forum / category
if topic.on_moderation:
return user.is_authenticated() and (user == topic.user or user in topic.forum.moderators)
return True
def may_moderate_topic(self, user, topic):
return user.is_superuser or user in topic.forum.moderators.all()
def may_close_topic(self, user, topic):
""" return True if `user` may close `topic` """
return self.may_moderate_topic(user, topic)
def may_open_topic(self, user, topic):
""" return True if `user` may open `topic` """
return self.may_moderate_topic(user, topic)
def may_stick_topic(self, user, topic):
""" return True if `user` may stick `topic` """
return self.may_moderate_topic(user, topic)
def may_unstick_topic(self, user, topic):
""" return True if `user` may unstick `topic` """
return self.may_moderate_topic(user, topic)
def may_create_post(self, user, topic):
""" return True if `user` is allowed to create a new post in `topic` """
if topic.forum.hidden and (not user.is_staff):
# if topic is hidden, only staff may post
return False
if topic.closed and (not user.is_staff):
# if topic is closed, only staff may post
return False
# only user which have 'pybb.add_post' permission may post
return user.has_perm('pybb.add_post')
def may_post_as_admin(self, user):
""" return True if `user` may post as admin """
return user.is_staff
#
# permission checks on posts
#
def filter_posts(self, user, qs):
""" return a queryset with posts `user` is allowed to see """
# first filter by topic availability
if not user.is_staff:
qs = qs.filter(Q(topic__forum__hidden=False) & Q(topic__forum__category__hidden=False))
if not defaults.PYBB_PREMODERATION or user.is_superuser:
# superuser may see all posts, also if premoderation is turned off moderation
# flag is ignored
return qs
elif user.is_authenticated():
# post is visible if user is author, post is not on moderation, or user is moderator
# for this forum
qs = qs.filter(Q(user=user) | Q(on_moderation=False) | Q(topic__forum__moderators=user))
else:
# anonymous user may not see posts which are on moderation
qs = qs.filter(on_moderation=False)
return qs
def may_view_post(self, user, post):
""" return True if `user` may view `post`, False otherwise """
if user.is_superuser:
return True
if post.on_moderation:
return post.user == user or user in post.topic.forum.moderators.all()
return True
def may_edit_post(self, user, post):
""" return True if `user` may edit `post` """
return user.is_superuser or post.user == user or self.may_moderate_topic(user, post.topic)
def may_delete_post(self, user, post):
""" return True if `user` may delete `post` """
return self.may_moderate_topic(user, post.topic)
#
# permission checks on users
#
def may_block_user(self, user, user_to_block):
""" return True if `user` may block `user_to_block` """
return user.has_perm('pybb.block_users')
perms = _resolve_class(defaults.PYBB_PERMISSION_HANDLER)
|
unlicense
| -5,898,669,304,835,996,000
| 38.506098
| 108
| 0.629824
| false
| 3.926061
| false
| false
| false
|
ktok07b6/polyphony
|
polyphony/compiler/libs.py
|
1
|
3330
|
single_port_ram = """module SinglePortRam #
(
parameter DATA_WIDTH = 8,
parameter ADDR_WIDTH = 4,
parameter RAM_DEPTH = 1 << ADDR_WIDTH
)
(
input clk,
input rst,
input [ADDR_WIDTH-1:0] ram_addr,
input [DATA_WIDTH-1:0] ram_d,
input ram_we,
output [DATA_WIDTH-1:0] ram_q
);
reg [DATA_WIDTH-1:0] mem [0:RAM_DEPTH-1];
reg [ADDR_WIDTH-1:0] read_addr;
assign ram_q = mem[read_addr];
always @ (posedge clk) begin
if (ram_we)
mem[ram_addr] <= ram_d;
read_addr <= ram_addr;
end
endmodule
"""
bidirectional_single_port_ram = """module BidirectionalSinglePortRam #
(
parameter DATA_WIDTH = 8,
parameter ADDR_WIDTH = 4,
parameter RAM_LENGTH = 16,
parameter RAM_DEPTH = 1 << (ADDR_WIDTH-1)
)
(
input clk,
input rst,
input [ADDR_WIDTH-1:0] ram_addr,
input [DATA_WIDTH-1:0] ram_d,
input ram_we,
output [DATA_WIDTH-1:0] ram_q,
output [ADDR_WIDTH-1:0] ram_len
);
reg [DATA_WIDTH-1:0] mem [0:RAM_DEPTH-1];
reg [ADDR_WIDTH-1:0] read_addr;
/*
integer i;
initial begin
for (i = 0; i < RAM_DEPTH; i = i + 1)
mem[i] = 0;
end
*/
function [ADDR_WIDTH-1:0] address (
input [ADDR_WIDTH-1:0] in_addr
);
begin
if (in_addr[ADDR_WIDTH-1] == 1'b1) begin
address = RAM_LENGTH + in_addr;
end else begin
address = in_addr;
end
end
endfunction // address
wire [ADDR_WIDTH-1:0] a;
assign a = address(ram_addr);
assign ram_q = mem[read_addr];
assign ram_len = RAM_LENGTH;
always @ (posedge clk) begin
if (ram_we)
mem[a] <= ram_d;
read_addr <= a;
end
endmodule
"""
fifo = """module FIFO #
(
parameter integer DATA_WIDTH = 32,
parameter integer ADDR_WIDTH = 2,
parameter integer LENGTH = 4
)
(
input clk,
input rst,
input [DATA_WIDTH - 1 : 0] din,
input write,
output full,
output [DATA_WIDTH - 1 : 0] dout,
input read,
output empty,
output will_full,
output will_empty
);
reg [ADDR_WIDTH - 1 : 0] head;
reg [ADDR_WIDTH - 1 : 0] tail;
reg [ADDR_WIDTH : 0] count;
wire we;
assign we = write && !full;
reg [DATA_WIDTH - 1 : 0] mem [0 : LENGTH - 1];
initial begin : initialize_mem
integer i;
for (i = 0; i < LENGTH; i = i + 1) begin
mem[i] = 0;
end
end
always @(posedge clk) begin
if (we) mem[head] <= din;
end
assign dout = mem[tail];
assign full = count >= LENGTH;
assign empty = count == 0;
assign will_full = write && !read && count == LENGTH-1;
assign will_empty = read && !write && count == 1;
always @(posedge clk) begin
if (rst == 1) begin
head <= 0;
tail <= 0;
count <= 0;
end else begin
if (write && read) begin
if (count == LENGTH) begin
count <= count - 1;
tail <= (tail == (LENGTH - 1)) ? 0 : tail + 1;
end else if (count == 0) begin
count <= count + 1;
head <= (head == (LENGTH - 1)) ? 0 : head + 1;
end else begin
count <= count;
head <= (head == (LENGTH - 1)) ? 0 : head + 1;
tail <= (tail == (LENGTH - 1)) ? 0 : tail + 1;
end
end else if (write) begin
if (count < LENGTH) begin
count <= count + 1;
head <= (head == (LENGTH - 1)) ? 0 : head + 1;
end
end else if (read) begin
if (count > 0) begin
count <= count - 1;
tail <= (tail == (LENGTH - 1)) ? 0 : tail + 1;
end
end
end
end
endmodule
"""
|
mit
| 682,272,391,786,811,300
| 20.907895
| 70
| 0.571471
| false
| 2.878133
| false
| false
| false
|
tonioo/modoboa
|
modoboa/core/forms.py
|
1
|
4606
|
# -*- coding: utf-8 -*-
"""Core forms."""
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import (
forms as auth_forms, get_user_model, password_validation
)
from django.db.models import Q
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.models import User
from modoboa.parameters import tools as param_tools
class LoginForm(forms.Form):
"""User login form."""
username = forms.CharField(
label=ugettext_lazy("Username"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
password = forms.CharField(
label=ugettext_lazy("Password"),
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
rememberme = forms.BooleanField(
initial=False,
required=False
)
class ProfileForm(forms.ModelForm):
"""Form to update User profile."""
oldpassword = forms.CharField(
label=ugettext_lazy("Old password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
newpassword = forms.CharField(
label=ugettext_lazy("New password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
confirmation = forms.CharField(
label=ugettext_lazy("Confirmation"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
class Meta(object):
model = User
fields = ("first_name", "last_name", "language",
"phone_number", "secondary_email")
widgets = {
"first_name": forms.TextInput(attrs={"class": "form-control"}),
"last_name": forms.TextInput(attrs={"class": "form-control"})
}
def __init__(self, update_password, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
if not update_password:
del self.fields["oldpassword"]
del self.fields["newpassword"]
del self.fields["confirmation"]
def clean_oldpassword(self):
if self.cleaned_data["oldpassword"] == "":
return self.cleaned_data["oldpassword"]
if param_tools.get_global_parameter("authentication_type") != "local":
return self.cleaned_data["oldpassword"]
if not self.instance.check_password(self.cleaned_data["oldpassword"]):
raise forms.ValidationError(_("Old password mismatchs"))
return self.cleaned_data["oldpassword"]
def clean_confirmation(self):
newpassword = self.cleaned_data["newpassword"]
confirmation = self.cleaned_data["confirmation"]
if not newpassword and not confirmation:
return confirmation
if newpassword != confirmation:
raise forms.ValidationError(_("Passwords mismatch"))
password_validation.validate_password(confirmation, self.instance)
return confirmation
def save(self, commit=True):
user = super(ProfileForm, self).save(commit=False)
if commit:
if self.cleaned_data.get("confirmation", "") != "":
user.set_password(
self.cleaned_data["confirmation"],
self.cleaned_data["oldpassword"]
)
user.save()
return user
class APIAccessForm(forms.Form):
"""Form to control API access."""
enable_api_access = forms.BooleanField(
label=ugettext_lazy("Enable API access"), required=False)
def __init__(self, *args, **kwargs):
"""Initialize form."""
user = kwargs.pop("user")
super(APIAccessForm, self).__init__(*args, **kwargs)
self.fields["enable_api_access"].initial = hasattr(user, "auth_token")
class PasswordResetForm(auth_forms.PasswordResetForm):
"""Custom password reset form."""
def get_users(self, email):
"""Return matching user(s) who should receive a reset."""
return (
get_user_model()._default_manager.filter(
email__iexact=email, is_active=True)
.exclude(Q(secondary_email__isnull=True) | Q(secondary_email=""))
)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
"""Send message to secondary email instead."""
to_email = context["user"].secondary_email
super(PasswordResetForm, self).send_mail(
subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name)
|
isc
| -1,737,334,883,102,439,700
| 34.160305
| 78
| 0.621146
| false
| 4.276695
| false
| false
| false
|
praekelt/txtalert
|
txtalert/apps/bookings/views.py
|
1
|
7598
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
import logging
from django.utils import timezone
from txtalert.core.models import Visit, PleaseCallMe, MSISDN, AuthProfile, Patient
from txtalert.core.forms import RequestCallForm
from txtalert.core.utils import normalize_msisdn
from datetime import date, datetime
from functools import wraps
def effective_page_range_for(page,paginator,delta=3):
return [p for p in range(page.number-delta,page.number+delta+1)
if (p > 0 and p <= paginator.num_pages)]
def auth_profile_required(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
return func(request, *args, **kwargs)
except AuthProfile.DoesNotExist:
return render_to_response('auth_profile_error.html', {
}, context_instance = RequestContext(request))
return wrapper
@login_required
@auth_profile_required
def index(request):
profile = request.user.get_profile()
return render_to_response("index.html", {
'profile': profile,
'patient': profile.patient,
}, context_instance = RequestContext(request))
@login_required
def appointment_change(request, visit_id):
profile = request.user.get_profile()
visit = get_object_or_404(Visit, pk=visit_id)
change_requested = request.POST.get('when')
if change_requested == 'later':
visit.reschedule_later()
messages.add_message(request, messages.INFO,
"Your request to change the appointment has been sent to " \
"the clinic. You will be notified as soon as possible.")
elif change_requested == 'earlier':
visit.reschedule_earlier()
messages.add_message(request, messages.INFO,
"Your request to change the appointment has been sent to " \
"the clinic. You will be notified as soon as possible.")
return render_to_response("appointment/change.html", {
'profile': profile,
'patient': profile.patient,
'visit': visit,
'change_requested': change_requested,
}, context_instance = RequestContext(request))
@login_required
def appointment_upcoming(request):
profile = request.user.get_profile()
patient = profile.patient
paginator = Paginator(patient.visit_set.upcoming(), 5)
page = paginator.page(request.GET.get('p', 1))
return render_to_response("appointment/upcoming.html", {
'profile': profile,
'patient': patient,
'paginator': paginator,
'page': page,
'effective_page_range': effective_page_range_for(page, paginator)
}, context_instance = RequestContext(request))
@login_required
def appointment_history(request):
profile = request.user.get_profile()
patient = profile.patient
paginator = Paginator(patient.visit_set.past().order_by('-date'), 5)
page = paginator.page(request.GET.get('p', 1))
return render_to_response("appointment/history.html", {
'profile': profile,
'patient': profile.patient,
'paginator': paginator,
'page': page,
'effective_page_range': effective_page_range_for(page, paginator)
}, context_instance=RequestContext(request))
@login_required
def attendance_barometer(request):
profile = request.user.get_profile()
patient = profile.patient
visits = patient.visit_set.all()
attended = visits.filter(status='a').count()
missed = visits.filter(status='m').count()
total = visits.filter(date__lt=date.today()).count()
if total:
attendance = int(float(attended) / float(total) * 100)
else:
attendance = 0.0
return render_to_response("attendance_barometer.html", {
'profile': profile,
'patient': patient,
'attendance': attendance,
'attended': attended,
'missed': missed,
'total': total
}, context_instance=RequestContext(request))
def request_call(request):
if request.POST:
form = RequestCallForm(request.POST)
if form.is_valid():
clinic = form.cleaned_data['clinic']
# normalize
msisdn = normalize_msisdn(form.cleaned_data['msisdn'])
# orm object
msisdn_record, _ = MSISDN.objects.get_or_create(msisdn=msisdn)
pcm = PleaseCallMe(user=clinic.user, clinic=clinic,
msisdn=msisdn_record, timestamp=timezone.now(),
message='Please call me!', notes='Call request issued via txtAlert Bookings')
pcm.save()
messages.add_message(request, messages.INFO,
'Your call request has been registered. '\
'The clinic will call you back as soon as possible.')
return HttpResponseRedirect(reverse('bookings:request_call'))
else:
form = RequestCallForm(initial={
'msisdn': '' if request.user.is_anonymous() else request.user.username
})
if request.user.is_anonymous():
profile = patient = None
else:
profile = request.user.get_profile()
patient = profile.patient
return render_to_response('request_call.html', {
'profile': profile,
'patient': patient,
'form': form,
}, context_instance=RequestContext(request))
def widget_landing(request):
if 'patient_id' in request.GET \
and 'msisdn' in request.GET:
try:
msisdn = normalize_msisdn(request.GET.get('msisdn'))
patient_id = request.GET.get('patient_id')
patient = Patient.objects.get(active_msisdn__msisdn=msisdn,
te_id=patient_id)
try:
visit = patient.next_visit()
except Visit.DoesNotExist:
visit = None
visits = patient.visit_set.all()
context = {
'msisdn': msisdn,
'patient_id': patient_id,
'patient': patient,
'name': patient.name,
'surname': patient.surname,
'next_appointment': visit.date if visit else '',
'visit_id': visit.pk if visit else '',
'clinic': visit.clinic.name if visit else '',
'attendance': int((1.0 - patient.risk_profile) * 100),
'total': visits.count(),
'attended': visits.filter(status='a').count(),
'rescheduled': visits.filter(status='r').count(),
'missed': visits.filter(status='m').count(),
}
except Patient.DoesNotExist:
context = {
'patient_id': patient_id,
'msisdn': msisdn,
}
else:
context = {
'patient_id': request.GET.get('patient_id', ''),
'msisdn': request.GET.get('msisdn', ''),
}
print context
return render_to_response('widget_landing.html', context,
context_instance=RequestContext(request))
def todo(request):
"""Anything that resolves to here still needs to be completed"""
return HttpResponse("This still needs to be implemented.")
def not_found(request):
"""test 404 template rendering"""
raise Http404
def server_error(request):
"""test 500 template rendering"""
raise Exception, '500 testing'
|
gpl-3.0
| -763,102,905,537,551,700
| 36.995
| 93
| 0.622664
| false
| 4.007384
| false
| false
| false
|
Flamacue/pretix
|
src/tests/plugins/test_ticketoutputpdf.py
|
2
|
1826
|
from datetime import timedelta
from decimal import Decimal
from io import BytesIO
import pytest
from django.utils.timezone import now
from PyPDF2 import PdfFileReader
from pretix.base.models import (
Event, Item, ItemVariation, Order, OrderPosition, Organizer,
)
from pretix.plugins.ticketoutputpdf.ticketoutput import PdfTicketOutput
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
o1 = Order.objects.create(
code='FOOBAR', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=Decimal('13.37'), payment_provider='banktransfer'
)
shirt = Item.objects.create(event=event, name='T-Shirt', default_price=12)
shirt_red = ItemVariation.objects.create(item=shirt, default_price=14, value="Red")
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='1234'
)
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='5678'
)
return event, o1
@pytest.mark.django_db
def test_generate_pdf(env, mocker):
mocked = mocker.patch('reportlab.pdfgen.canvas.Canvas.drawString')
event, order = env
event.settings.set('ticketoutput_pdf_code_x', 30)
event.settings.set('ticketoutput_pdf_code_y', 50)
event.settings.set('ticketoutput_pdf_code_s', 2)
o = PdfTicketOutput(event)
fname, ftype, buf = o.generate(order.positions.first())
assert ftype == 'application/pdf'
pdf = PdfFileReader(BytesIO(buf))
assert pdf.numPages == 1
assert mocked.called
|
apache-2.0
| 6,230,654,048,277,624,000
| 33.45283
| 87
| 0.693866
| false
| 3.406716
| false
| false
| false
|
abusesa/abusehelper
|
abusehelper/core/rules/classifier.py
|
1
|
1033
|
class Classifier(object):
def __init__(self):
self._rules = dict()
def inc(self, rule, class_id):
classes = self._rules.get(rule, None)
if classes is None:
classes = dict()
self._rules[rule] = classes
classes[class_id] = classes.get(class_id, 0) + 1
def dec(self, rule, class_id):
classes = self._rules.get(rule, None)
if classes is None:
return
count = classes.get(class_id, 0) - 1
if count > 0:
classes[class_id] = count
else:
classes.pop(class_id, None)
if not classes:
self._rules.pop(rule, None)
def classify(self, obj):
result = set()
cache = dict()
for rule, classes in self._rules.iteritems():
if result.issuperset(classes):
continue
if rule.match(obj, cache):
result.update(classes)
return result
def is_empty(self):
return not self._rules
|
mit
| 496,745,086,639,479,040
| 25.487179
| 56
| 0.516941
| false
| 4.066929
| false
| false
| false
|
geobricks/pgeo
|
pgeo/metadata/metadata.py
|
1
|
2602
|
import os
import json
from pgeo.utils.json import dict_merge_and_convert_dates
from pgeo.metadata.db_metadata import DBMetadata
from pgeo.metadata.search import MongoSearch
from pgeo.utils import log
from pgeo.config.metadata.core import template as core_template
from pgeo.config.metadata.raster import template as raster_template
log = log.logger(__name__)
# REMOVE EXAMPLE
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['EARTHSTAT']}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['TRMM']}})
# db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['MODIS']}})
# db.layer.find({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['MODIS']}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['MODIS-SADC']}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['MODIS_TEST']}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['Doukkala-Seasonal-wheat']}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {'$in': ['Doukkala - actual evapotransipiration']}})
# with Regular expression
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {$regex: /^MOROCCO/}})
#db.layer.remove({'meContent.seCoverage.coverageSector.codes.code': {$regex: /^JRC/}})
#db.layer.find({'meContent.seCoverage.coverageSector.codes.code': {$regex: /^UMD/}})
#db.layer.find({'uid': {$regex: /^UMD/}})
class Metadata:
db_metadata = None
search = None
def __init__(self, settings):
self.settings = settings
print settings
self.db_metadata = DBMetadata(settings["db"]["metadata"])
self.search = MongoSearch(settings["db"]["metadata"]['connection'], settings["db"]["metadata"]["database"], settings["db"]["metadata"]['document']['layer'])
log.info("---Metadata initialization---")
log.info(self.db_metadata)
log.info(self.search)
def merge_layer_metadata(self, template_name, data):
"""
Merge user's data with the core metadata and the selected template
@param template_name: Name of the template, e.g. 'modis'
@param data: User data, in JSON format
@return: Merged JSON
"""
if template_name == "raster":
out = dict_merge_and_convert_dates(core_template, raster_template)
elif template_name == "vector":
log.error("TODO: vector template")
out = dict_merge_and_convert_dates(out, data)
#log.info(out)
return out
|
gpl-2.0
| -6,503,222,040,618,752,000
| 40.31746
| 164
| 0.673328
| false
| 3.423684
| false
| false
| false
|
habalux/pglog2grok
|
pglog2grok.py
|
1
|
4033
|
#!/usr/bin/env python
#
# Small script for generating a logstash grok filter and patterns for postgresql
# using a non-default log_line_prefix setting.
#
# Output of this script has NOT been tested in any production environment as of yet.
#
# Copyright (c) 2014, Teemu Haapoja <teemu.haapoja@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# Custom patterns
# PGLOG_TZ is a modified TZ pattern (original didn't recognize "EET" as valid)
pg_patterns = """
PGLOG_TZ (?:[PMCE][SDE]T|UTC)
PGLOG_APPLICATION_NAME .*?
PGLOG_USER_NAME .*?
PGLOG_DATABASE_NAME .*?
PGLOG_REMOTE_HOST_PORT (\[local\]|%{IP:host}\(%{POSINT:port}\))
PGLOG_REMOTE_HOST (\[local\]|%{IP:host})
PGLOG_PROCESS_ID %{POSINT}
PGLOG_TIMESTAMP %{TIMESTAMP_ISO8601} %{PGLOG_TZ:TZ}
PGLOG_COMMAND_TAG .*?
PGLOG_SQL_STATE .*?
PGLOG_SESSION_ID [0-9\.A-Fa-f]+
PGLOG_SESSION_LINE_NUMBER %{POSINT}
PGLOG_SESSION_START_TIMESTAMP %{PGLOG_TIMESTAMP}
PGLOG_VIRTUAL_TRANSACTION_ID ([\/0-9A-Fa-f]+)
PGLOG_TRANSACTION_ID ([0-9A-Fa-f])+
PGLOG_LOGLEVEL (DEBUG[1-5]|INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DETAIL)
PGLOG_MESSAGE .*
"""
def prefix_to_grok(pr):
replace_map = {
r'%a' : "%{PGLOG_APPLICATION_NAME:application_name}",
r'%u' : "%{PGLOG_USER_NAME:user_name}",
r'%d' : "%{PGLOG_DATABASE_NAME:database_name}",
r'%r' : "%{PGLOG_REMOTE_HOST_PORT:remote_host_port}",
r'%h' : "%{PGLOG_REMOTE_HOST:remote_host}",
r'%p' : "%{PGLOG_PROCESS_ID:process_id}",
r'%t' : "%{PGLOG_TIMESTAMP}",
r'%m' : "%{PGLOG_TIMESTAMP}",
r'%i' : "%{PGLOG_COMMAND_TAG:command_tag}",
r'%e' : "%{PGLOG_SQL_STATE:sql_state}",
r'%c' : "%{PGLOG_SESSION_ID:session_id}",
r'%l' : "%{PGLOG_SESSION_LINE_NUMBER:session_line_number}",
r'%s' : "%{PGLOG_SESSION_START_TIMESTAMP:session_start_timestamp}",
r'%v' : "%{PGLOG_VIRTUAL_TRANSACTION_ID:virtual_transaction_id}",
r'%x' : "%{PGLOG_TRANSACTION_ID:transaction_id}",
r'%q' : "",
}
pr = pr.replace(r'%%',r'%')
for k,v in replace_map.items():
pr = pr.replace(k,v)
return "%s%%{PGLOG_LOGLEVEL:loglevel}: %%{PGLOG_MESSAGE:message}"%(pr)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Create a grok pattern for your postgresql configuration")
parser.add_argument('-q','--quiet', help="Be quiet, only output the grok pattern", action='store_const', const=True)
parser.add_argument('-p', '--prefix', help="log_line_prefix from YOUR postgresql.conf", required=True)
args = parser.parse_args()
if args.quiet:
print prefix_to_grok(args.prefix)
else:
print "You need to add these patterns to your logstash patterns_dir: "
print "> ==== snip === <"
print pg_patterns
print "> ==== snip === <"
print ""
print "This is the filter for your log_line_prefix:\n\n%s"%(prefix_to_grok(args.prefix))
|
bsd-2-clause
| -7,059,104,021,549,284,000
| 38.539216
| 117
| 0.706422
| false
| 3.046073
| false
| false
| false
|
TeMPO-Consulting/mediadrop
|
mediacore/controllers/login.py
|
1
|
4471
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2013 MediaCore Inc., Felix Schwarz and other contributors.
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from formencode import Invalid
from pylons import request, tmpl_context
from mediacore.forms.login import LoginForm
from mediacore.lib.base import BaseController
from mediacore.lib.helpers import redirect, url_for
from mediacore.lib.i18n import _
from mediacore.lib.decorators import expose, observable
from mediacore.plugin import events
import logging
log = logging.getLogger(__name__)
login_form = LoginForm()
class LoginController(BaseController):
@expose('login.html')
@observable(events.LoginController.login)
def login(self, came_from=None, **kwargs):
if request.environ.get('repoze.who.identity'):
redirect(came_from or '/')
# the friendlyform plugin requires that these values are set in the
# query string
form_url = url_for('/login/submit',
came_from=(came_from or '').encode('utf-8'),
__logins=str(self._is_failed_login()))
login_errors = None
if self._is_failed_login():
login_errors = Invalid('dummy', None, {}, error_dict={
'_form': Invalid(_('Invalid username or password.'), None, {}),
'login': Invalid('dummy', None, {}),
'password': Invalid('dummy', None, {}),
})
return dict(
login_form = login_form,
form_action = form_url,
form_values = kwargs,
login_errors = login_errors,
)
@expose()
def login_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediacore.lib.auth
"""
pass
@expose()
def logout_handler(self):
"""This is a dummy method.
Without a dummy method, Routes will throw a NotImplemented exception.
Calls that would route to this method are intercepted by
repoze.who, as defined in mediacore.lib.auth
"""
pass
@expose()
@observable(events.LoginController.post_login)
def post_login(self, came_from=None, **kwargs):
if not request.identity:
# The FriendlyForm plugin will always issue a redirect to
# /login/continue (post login url) even for failed logins.
# If 'came_from' is a protected page (i.e. /admin) we could just
# redirect there and the login form will be displayed again with
# our login error message.
# However if the user tried to login from the front page, this
# mechanism doesn't work so go to the login method directly here.
self._increase_number_of_failed_logins()
return self.login(came_from=came_from)
if came_from:
redirect(came_from)
# It is important to return absolute URLs (if app mounted in subdirectory)
if request.perm.contains_permission(u'edit') or request.perm.contains_permission(u'admin'):
redirect(url_for('/admin', qualified=True))
redirect(url_for('/', qualified=True))
@expose()
@observable(events.LoginController.post_logout)
def post_logout(self, came_from=None, **kwargs):
redirect('/')
def _is_failed_login(self):
# repoze.who.logins will always be an integer even if the HTTP login
# counter variable contained a non-digit string
return (request.environ.get('repoze.who.logins', 0) > 0)
def _increase_number_of_failed_logins(self):
request.environ['repoze.who.logins'] += 1
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# BaseController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
request.identity = request.environ.get('repoze.who.identity')
tmpl_context.identity = request.identity
return BaseController.__call__(self, environ, start_response)
|
gpl-3.0
| -8,258,448,679,975,395,000
| 39.645455
| 99
| 0.641244
| false
| 4.198122
| false
| false
| false
|
cryptica/slapnet
|
benchmarks/scalable/PhilosophersCM84/make_net.py
|
1
|
2045
|
#!/usr/bin/python3
import sys
print("""
petri net "The drinking philosophers for n=2" {
places {
p1h p1e
p2h p2e
req1p1 req1p2
req2p1 req2p2
fork1p1 fork1p2
fork2p1 fork2p2
fork1clean fork1dirty
fork2clean fork2dirty
}
transitions {
p1req1 p1req2 p1give1 p1give2 p1eat p1done
p2req1 p2req2 p2give1 p2give2 p2eat p2done
//p1done(2),p1eat(2),p1give1,p1give2,p1req1,p1req2,
//p2give1,p2give2,p2req1,p2req2
}
arcs {
{ p1h req1p1 fork1p2 } -> p1req1 -> { p1h req1p2 fork1p2 }
{ p1h req2p1 fork2p2 } -> p1req2 -> { p1h req2p2 fork2p2 }
{ p1h req1p1 fork1p1 fork1dirty } -> p1give1 -> { p1h req1p1 fork1p2 fork1clean }
{ p1h req2p1 fork2p1 fork2dirty } -> p1give2 -> { p1h req2p1 fork2p2 fork2clean }
{ p1h fork1p1 fork2p1 fork1clean fork2clean } -> p1eat -> { p1e fork1p1 fork2p1 fork1dirty fork2dirty }
{ p1e } -> p1done -> { p1h }
{ p2h req1p2 fork1p1 } -> p2req1 -> { p2h req1p1 fork1p1 }
{ p2h req2p2 fork2p1 } -> p2req2 -> { p2h req2p1 fork2p1 }
{ p2h req1p2 fork1p2 fork1dirty } -> p2give1 -> { p2h req1p2 fork1p1 fork1clean }
{ p2h req2p2 fork2p2 fork2dirty } -> p2give2 -> { p2h req2p2 fork2p1 fork2clean }
{ p2h fork1p2 fork2p2 fork1clean fork2clean } -> p2eat -> { p2e fork1p2 fork2p2 fork1dirty fork2dirty }
{ p2e } -> p2done -> { p2h }
}
initial {
p1h p2h
fork1dirty fork2dirty
fork1p1 fork2p1 req1p2 req2p2
}
}
liveness property "philosopher 1 does not starve" {
p1req1 + p1req2 + p1give1 + p1give2 + p1eat + p1done > 0 &&
p2req1 + p2req2 + p2give1 + p2give2 + p2eat + p2done > 0 &&
p1eat = 0
}
liveness property "philosopher 2 does not starve" {
p1req1 + p1req2 + p1give1 + p1give2 + p1eat + p1done > 0 &&
p2req1 + p2req2 + p2give1 + p2give2 + p2eat + p2done > 0 &&
p2eat = 0
}
safety property "mutual exclusion" {
p1e >= 1 && p2e >= 1
}
""")
|
gpl-3.0
| -4,088,092,992,242,717,000
| 33.083333
| 112
| 0.605868
| false
| 2.310734
| false
| false
| false
|
uwosh/uwosh.intranet.policy
|
uwosh/intranet/policy/importexport.py
|
1
|
5459
|
from Products.CMFCore.utils import getToolByName
from Products.LDAPMultiPlugins import manage_addLDAPMultiPlugin
def install(context):
if not context.readDataFile('uwosh.intranet.policy.txt'):
return
setupLDAPPlugin(context)
def setupLDAPPlugin(context):
ldap_plugin_id = 'ldap_authentication'
SUBTREE = 2 # this value comes from the zmi "Add LDAP Multi Plugin" html source
acl_users = context.getSite().acl_users
if hasattr(acl_users, ldap_plugin_id):
logger = context.getLogger('uwosh.intranet.policy')
logger.warning('Not configuring LDAP plugin, because "acl_users.%s" already exists.' % ldap_plugin_id)
return
manage_addLDAPMultiPlugin(
acl_users,
id=ldap_plugin_id,
title='LDAP Authentication',
LDAP_server='ldap.uwosh.edu:389',
login_attr='uid',
uid_attr='uid',
users_base='ou=people,o=uwosh.edu,dc=uwosh,dc=edu',
users_scope=SUBTREE,
roles='Anonymous',
groups_base='ou=groups,o=uwosh.edu,dc=uwosh,dc=edu',
groups_scope=SUBTREE,
binduid='',
bindpwd='',
binduid_usage=False,
rdn_attr='uid',
local_groups=False,
use_ssl=False,
encryption='SHA',
read_only=True
)
ldap_auth = getattr(acl_users, ldap_plugin_id)
ldap_schema = {
'cn': {
'public_name': 'fullname',
'binary': False,
'ldap_name': 'cn',
'friendly_name': 'Canonical Name',
'multivalued': False
},
'mail': {
'public_name': 'email',
'binary': False,
'ldap_name': 'mail',
'friendly_name': 'Email Address',
'multivalued': False
},
'sn': {
'public_name': 'lastname',
'binary': False,
'ldap_name': 'sn',
'friendly_name': 'Last Name',
'multivalued': False
},
'givenName': {
'public_name': 'firstname',
'binary': False,
'ldap_name': 'givenName',
'friendly_name': 'First Name',
'multivalued': False
},
'uid': {
'public_name': '',
'binary': False,
'ldap_name': 'uid',
'friendly_name': 'uid',
'multivalued': False
},
'eduPersonAffiliation': {
'public_name': 'eduPersonAffiliation',
'binary': False,
'ldap_name': 'eduPersonAffiliation',
'friendly_name': 'eduPersonAffiliation',
'multivalued': True
},
'eduPersonPrimaryAffiliation': {
'public_name': 'eduPersonPrimaryAffiliation',
'binary': False,
'ldap_name': 'eduPersonPrimaryAffiliation',
'friendly_name': 'eduPersonPrimaryAffiliation',
'multivalued': False
},
'ou': {
'public_name': 'ou',
'binary': False,
'ldap_name': 'ou',
'friendly_name': 'Organizational Unit',
'multivalued': False
},
'uwodepartmentassoc': {
'public_name': 'uwodepartmentassoc',
'binary': False,
'ldap_name': 'uwodepartmentassoc',
'friendly_name': 'UWO Department Association',
'multivalued': False
},
'l': {
'public_name': 'location',
'binary': False,
'ldap_name': 'l',
'friendly_name': 'Location',
'multivalued': False
},
'telephoneNumber': {
'public_name': 'phone',
'binary': False,
'ldap_name': 'telephoneNumber',
'friendly_name': 'Phone Number',
'multivalued': False
},
'mailUserStatus': {
'public_name': 'mailUserStatus',
'binary': False,
'ldap_name': 'mailUserStatus',
'friendly_name': 'Mail User Status',
'multivalued': False
},
'uwomailstop': {
'public_name': 'uwomailstop',
'binary': False,
'ldap_name': 'uwomailstop',
'friendly_name': 'UWO Mail Stop',
'multivalued': False
},
'displayName': {
'public_name': 'displayname',
'binary': False,
'ldap_name': 'displayName',
'friendly_name': 'Display Name',
'multivalued': False
},
}
ldap_auth.acl_users.setSchemaConfig(ldap_schema)
ldap_auth.acl_users._user_objclasses = ['inetOrgPerson']
ldap_auth.manage_activateInterfaces(['IUserEnumerationPlugin', 'IPropertiesPlugin', 'IAuthenticationPlugin'])
movePluginToHeadOfList(acl_users.plugins, 'IPropertiesPlugin', ldap_plugin_id)
def movePluginToHeadOfList(plugin_registry, plugin_type, plugin_id):
interface = plugin_registry._getInterfaceFromName(plugin_type)
index = plugin_registry._getPlugins(interface).index(plugin_id)
while index > 0:
plugin_registry.movePluginsUp(interface, [plugin_id])
new_index = plugin_registry._getPlugins(interface).index(plugin_id)
if new_index >= index:
# The plugin didn't move up. We calmly sidestep the infinite loop.
break
index = new_index
|
gpl-2.0
| -1,045,420,193,652,714,100
| 32.913043
| 113
| 0.52702
| false
| 4.067809
| false
| false
| false
|
realopenit/bubble
|
bubble/commands/cmd_examples.py
|
1
|
1437
|
# -*- coding: utf-8 -*-
# Part of bubble. See LICENSE file for full copyright and licensing details.
import click
from ..cli import pass_bubble
from ..util.examples import all_examples_functions
@click.command('examples',
short_help='Show example for doing some task in bubble(experimental)')
@click.option('--name',
'-n',
default=None,
help='show the example with the name')
@click.option('--all',
'-a',
is_flag=True,
default=False,
help='show all the examples')
@pass_bubble
def cli(ctx, name,all):
"""Show example for doing some task in bubble(experimental)"""
ctx.gbc.say('all_example_functions',stuff=all_examples_functions, verbosity=1000)
for example in all_examples_functions:
if all or (name and example['name'] == name):
if all:
ctx.gbc.say('example',stuff=example, verbosity=100)
name = example['name']
#click.echo_via_pager(example['fun']())
click.echo("#"*80)
click.echo("### start of bubble example: "+name)
click.echo("#"*80)
click.echo(example['fun']())
click.echo("#"*80)
click.echo("### end of bubble example: "+name)
click.echo("#"*80)
click.echo()
else:
click.echo("available example: " + example['name'])
|
gpl-3.0
| -2,532,512,840,913,987,000
| 33.214286
| 85
| 0.557411
| false
| 4.047887
| false
| false
| false
|
yw374cornell/e-mission-server
|
emission/core/wrapper/stop.py
|
1
|
1696
|
import logging
import emission.core.wrapper.wrapperbase as ecwb
class Stop(ecwb.WrapperBase):
props = {"trip_id": ecwb.WrapperBase.Access.WORM, # trip_id of the parent trip
"enter_ts": ecwb.WrapperBase.Access.WORM, # the timestamp of entry (in secs)
"enter_local_dt": ecwb.WrapperBase.Access.WORM, # searchable datetime in timezone of entry
"enter_fmt_time": ecwb.WrapperBase.Access.WORM, # formatted entry time in timezone of place
"exit_ts": ecwb.WrapperBase.Access.WORM, # the timestamp of exit (in secs)
"exit_local_dt": ecwb.WrapperBase.Access.WORM, # searchable datetime in timezone of exit
"exit_fmt_time": ecwb.WrapperBase.Access.WORM, # formatted time in timezone of place
"ending_section": ecwb.WrapperBase.Access.WORM, # the id of the trip just before this
"starting_section": ecwb.WrapperBase.Access.WORM, # the id of the trip just after this
"enter_loc": ecwb.WrapperBase.Access.WORM, # the location in geojson format
"exit_loc": ecwb.WrapperBase.Access.WORM, # the location in geojson format
"source": ecwb.WrapperBase.Access.WORM, # the method used to generate this place
"duration": ecwb.WrapperBase.Access.WORM} # the duration for which we were in this place
enums = {}
geojson = ["enter_loc", "exit_loc"]
nullable = ["enter_ts", "enter_fmt_time", "ending_section", # for the start of a chain
"exit_ts", "exit_fmt_time", "starting_section"] # for the end of a chain
local_dates = ['enter_local_dt', 'exit_local_dt']
def _populateDependencies(self):
pass
|
bsd-3-clause
| -4,098,555,770,510,610,400
| 64.230769
| 104
| 0.653892
| false
| 3.608511
| false
| false
| false
|
RNAcentral/rnacentral-import-pipeline
|
tests/cli/pdb_test.py
|
1
|
1261
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pytest
from click.testing import CliRunner
from rnacentral_pipeline.cli import pdb
@pytest.mark.parametrize(
"command,output,pdbs",
[
("data", "pdb.json", ("1S72",)),
("extra", "pdb-extra.json", ("1S72",)),
],
)
def test_can_fetch_expected_data(command, output, pdbs):
runner = CliRunner()
with runner.isolated_filesystem():
args = [command, output]
args.extend(pdbs)
result = runner.invoke(pdb.cli, args)
assert result.exit_code == 0, result.output
assert not result.exception
with open(output, "rb") as raw:
assert raw.read()
|
apache-2.0
| 7,726,899,241,946,992,000
| 29.756098
| 72
| 0.694687
| false
| 3.730769
| false
| false
| false
|
ekapujiw2002/kweb
|
kwebhelper.py
|
1
|
34980
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# helper file for kweb Minimal Kiosk Browser
# Copyright 2013-2014 by Guenter Kreidl
# free software without any warranty
# you can do with it what you like
# version 1.4
import os,urllib,sys,subprocess,threading,time
import Tkinter as tk
# GLOBAL OPTIONS
# use external settings file, if not empty
#settings = ''
settings = '/usr/local/bin/kwebhelper_settings.py'
# where the downloads, PDF files etc. go, make sure a "Downloads" folder exists there
#homedir = '/media/volume'
homedir = ''
# if empty, the user's home dir will be taken
# OMXPLAYER AUDIO VIDEO OPTIONS
omxoptions = []
# for selecting the sound output, uncomment one of these:
#omxoptions = ['-o','hdmi']
#omxoptions = ['-o','local']
# more options are also possible of course
# special options for watching live tv streams (omxplayer > 0.32)
omx_livetv_options = ['--live']
# add the start of your live tv stream links to this list to enable live tv options
live_tv = []
# like this:
#live_tv = ['http://192.168.0.5:9082']
# set this to false, if you want to allow more than one omxplayer instance
kill_omxplayer = True
#kill_omxplayer = False
# mimetypes: if given, this will restrict what omxplayer will be given to play:
mimetypes = []
# normally omxplayer is started from a terminal (xterm), to clear the screen and get full keyboard control
# Set the following to "False" to use omxplayer without starting a terminal first
omxplayer_in_terminal_for_video = True
#omxplayer_in_terminal_for_video = False
omxplayer_in_terminal_for_audio = True
#omxplayer_in_terminal_for_audio = False
# options for m3u playlists, to check that they contain only audio files or streams
audioextensions = ['mp3','aac','flac','wav','wma','cda','ogg','ogm','ac3','ape']
try_stream_as_audio = False
# if set to "True", the following list will be used for checking for video files
videoextensions = ['asf','avi','mpg','mp4','mpeg','m2v','m1v','vob','divx','xvid','mov','m4v','m2p','mkv','m2ts','ts','mts','wmv','webm']
# Play audio files or playlists that contain only audio files in omxaudioplayer GUI:
useAudioplayer = True
# options for omxplayer to be used when playing audio
omxaudiooptions = []
# volume setting when starting omxaudioplayer ranging from -20 to 4 ( -60 to +12 db)
defaultaudiovolume = 0
# start playing and close after playing last song automatically (if "True", set to "False" to disable)
autoplay = True
autofinish = True
# Interface settings for omxaudioplayer:
# The font to be used for playlist and buttons
fontname = 'SansSerif'
# value between 10 and 22, will also determine the size of the GUI window:
fontheight = 14
# number of entries displayed in playlist window, between 5 and 25:
maxlines = 8
# width of the window, value between 40 and 80, defines the minimum number of characters of the song name
# displayed in the songlist (usually much more are shown!)
lwidth = 40
# if the following is set to "True", vlc will be used to play audio files and playlists (audio only)
useVLC = False
#useVLC = True
#COMMAND EXECUTION OPTIONS
# if this is set to "True", all Desktop (GUI) programs will be executed without starting a terminal first
check_desktop = True
#check_desktop = False
# direct commands will be executed without starting a terminal first
# use it for background commands or programs with a GUI that are not desktop programs or if check_desktop is set to "False"
direct_commands = ['kwebhelper.py','omxplayer']
# preferred terminal to run commands in, must be set
preferred_terminal = 'lxterminal'
#preferred_terminal = 'xterm'
formdata_in_terminal = False
#formdata_in_terminal = True
# set the following to "True", if you want to spare memory overhead (but you'll get more disk write accesses)
run_as_script = False
#run_as_script = True
# PDF OPTIONS
# preferred pdf reader; both must be set or emtpy
pdfprogpath = ''
pdfprog = ''
#pdfprogpath = '/usr/bin/mupdf'
#pdfprog = 'mupdf'
# additional options for pdf program (must match the selected program!):
pdfoptions = []
#pdfoptions = ['-fullscreen']
# this will allow to open pdf files on a local server as files instead of downloading them first;
# will only work with "http://localhost" links
pdfpathreplacements = {}
#pdfpathreplacements = {'http://localhost:8073/Ebooks1':'file:///var/www/Ebooks1'}
# DOWNLOAD OPTIONS
#download options for external download mode, enable one of these options:
show_download_in_terminal = True
#show_download_in_terminal = False
# ONLINE VIDEO OPTIONS
# options for pages containing video, either HTML5 video tags or all websites supported by youtube-dl
# if html5 video tags include more than one source format, select the preferred one here
preferred_html5_video_format = '.mp4'
# Choose, if HTML5 URL extraction is tried first and youtube-dl extraction afterwards or vice versa
html5_first = True
#html5_first = False
#additional youtube-dl options, e. g. selecting a resolution or file format
youtube_dl_options = []
#youtube_dl_options = ['-f','37/22/18']
# special omxplayer options for web video
youtube_omxoptions = []
# to use the same options as for other video, set
#youtube_omxoptions = omxoptions
### end of global settings
# take settings from separate file:
if settings and os.path.exists(settings):
try:
execfile(settings)
except:
pass
if not homedir:
homedir = os.path.expanduser('~')
dldir = homedir +'/Downloads'
if not os.path.exists(dldir):
os.mkdir(dldir)
# helper functions
def get_opt(options):
if '--win' in options:
pos = options.index('--win')
if pos < (len(options) -2):
options[pos+1] = '"' + options[pos+1] + '"'
return ' '.join(options)
def get_playlist(url, audio_as_stream):
playlist = []
fn = ''
audioonly = True
go = False
if url.startswith('http://'):
try:
fn,h = urllib.urlretrieve(url)
go = True
except:
pass
elif url.startswith('file://'):
fn = url.replace('file://','').replace('%20',' ')
fn = urllib.unquote(fn)
if os.path.exists(fn):
go = True
elif os.path.exists(url):
fn = url
go = True
if go:
f = file(fn,'rb')
pl = f.read()
f.close()
if url.startswith('http://'):
os.remove(fn)
pll = pl.split('\n')
if url.lower().endswith('.m3u') or url.lower().endswith('.m3u8'):
for s in pll:
if s != '' and not s.startswith('#'):
if s.split('.')[-1].lower() in audioextensions:
pass
elif audio_as_stream and s.split('.')[-1].lower() not in videoextensions:
pass
else:
audioonly = False
playlist.append(s)
elif url.lower().endswith('.pls'):
for s in pll:
if s.startswith('File'):
aurl = s.split('=')[1].strip()
playlist.append(aurl)
return (audioonly, playlist)
def video_tag_extractor(url):
result = []
if url.startswith('file://'):
fpath = url.replace('file://','').replace('%20',' ')
else:
try:
fpath,h = urllib.urlretrieve(url)
except:
return result
f = file(fpath,'rb')
html = f.read()
f.close()
if '<video ' in html:
htl = html.split('<video')
for ind in range(1,len(htl)):
if not 'src="' in htl[ind]:
continue
vtl = htl[ind].split('src="')
if len(vtl) > 2:
links = []
for l in vtl[1:]:
pos = l.find('"')
links.append(l[0:pos])
link = links[0]
for li in links:
if preferred_html5_video_format and li.lower().endswith(preferred_html5_video_format):
link = li
else:
vt = vtl[1]
pos = vt.find('"')
link = vt[0:pos]
if link.startswith('http://') or link.startswith('https://') or link.startswith('rtsp://') or link.startswith('rtmp://'):
result.append(link)
elif link.startswith('file://'):
newlink = '"'+link.replace('file://','').replace('%20',' ')+'"'
result.append(newlink)
else:
urll = url.split('/')
if link.startswith('/'):
newlink = '/'.join(urll[0:3]+[link[1:]])
else:
relcount = len(urll) - 1 - link.count('../')
newlink = '/'.join(urll[0:relcount]+[link.replace('../','')])
if newlink.startswith('file://'):
newlink = '"'+newlink.replace('file://','').replace('%20',' ')+'"'
result.append(newlink)
return result
def play_ytdl(res):
vlist = res.split('\n')
if (len(vlist) == 1) or (len(vlist) == 2 and vlist[1] == ''):
vurl = vlist[0]
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
pargs = ["xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",'omxplayer']+youtube_omxoptions+[vurl]+['>', '/dev/null', '2>&1']
os.execv("/usr/bin/xterm",pargs)
else:
if kill_omxplayer:
script = '#!/bin/bash\nkillall omxplayer.bin > /dev/null 2>&1\n'
else:
script = '#!/bin/bash\n'
for vurl in vlist:
if vurl != '':
script += 'omxplayer ' + get_opt(youtube_omxoptions) + ' "' + vurl + '" > /dev/null 2>&1\n'
f = file(dldir+os.sep+'playall.sh','wb')
f.write(script)
f.close()
os.chmod(dldir+os.sep+'playall.sh',511)
os.execl("/usr/bin/xterm","xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",dldir+os.sep+'playall.sh')
def play_html5(tags):
if len(tags) == 1:
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
pargs = ["xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",'omxplayer']+youtube_omxoptions+[tags[0]]+['>', '/dev/null', '2>&1']
os.execv("/usr/bin/xterm",pargs)
else:
if kill_omxplayer:
script = '#!/bin/bash\nkillall omxplayer.bin > /dev/null 2>&1\n'
else:
script = '#!/bin/bash\n'
for t in tags:
script += 'omxplayer ' + get_opt(youtube_omxoptions) + ' ' + t + ' > /dev/null 2>&1\n'
f = file(dldir+os.sep+'playall.sh','wb')
f.write(script)
f.close()
os.chmod(dldir+os.sep+'playall.sh',511)
os.execl("/usr/bin/xterm","xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",dldir+os.sep+'playall.sh')
# omxaudioplayer GUI
class omxaudioplayer(tk.Frame):
def __init__(self, master=None, playlist=[],mode='simple',autofinish=True,volume=0,omxoptions=[],
fontheight=14,fontname='SansSerif',maxlines=8,width=40,autoplay=True):
tk.Frame.__init__(self, master)
self.set_defaults()
self.fontheight = min([max([fontheight,10]),22])
self.fontname = fontname
try:
self.font = (self.fontname,str(self.fontheight),'bold')
except:
self.font = ('SansSerif',str(self.fontheight),'bold')
self.maxlines = min([max([maxlines,5]),25])
self.defaultwidth = min([max([width,40]),80])
self.root = master
self.root.bind("<<finished>>",self.on_finished)
self.root.protocol('WM_DELETE_WINDOW', self.on_close)
self.root.title("omxaudioplayer")
self.root.resizable(False,False)
for keysym in self.keybindings:
self.root.bind(keysym,self.keyp_handler)
self.grid()
self.omxoptions = omxoptions
self.autofinish = autofinish
self.playlist = playlist
self.autoplay = autoplay
self.mode = mode
self.status = 'stopped'
self.omxprocess = None
self.omxwatcher = None
self.songpointer = 0
self.listpointer = 0
self.currentvolume = min([max([volume,-20]),4])
self.changedvolume = tk.IntVar()
self.changedvolume.set(volume)
self.playcontent = tk.StringVar()
self.playcontent.set(self.playstring)
self.createwidgets()
if self.playlist and self.autoplay:
self.playsong(0)
def set_defaults(self):
self.playstring = '>'
self.pausestring = '||'
self.stopstring = '[]'
self.rewstring = '←'
self.fwdstring = '→'
self.prevstring = '↑'
self.nextstring = '↓'
self.vchdelay = 0.05
self.keybindings = ['<KeyPress-Down>','<KeyPress-Up>','<KeyPress-space>','<KeyPress-q>','<KeyPress-Escape>',
'<KeyPress-plus>','<KeyPress-minus>','<KeyPress-Left>','<KeyPress-Right>','<KeyPress-Return>',
'<KeyPress-KP_Enter>','<KeyPress-KP_Add>','<KeyPress-KP_Subtract>']
def keyp_handler(self, event):
if event.keysym in ['space','Return','KP_Enter']:
self.playpause()
elif event.keysym in ['q','Escape']:
self.stop()
elif event.keysym == 'Down':
while self.nextbutton['state'] == tk.DISABLED:
time.sleep(0.1)
self.nextsong()
elif event.keysym == 'Up':
while self.prevbutton['state'] == tk.DISABLED:
time.sleep(0.1)
self.prevsong()
elif event.keysym == 'Left':
self.sendcommand('\x1b\x5b\x44')
elif event.keysym == 'Right':
self.sendcommand('\x1b\x5b\x43')
else:
av = 0
if event.keysym in ['plus','KP_Add']:
av = 1
elif event.keysym in ['minus','KP_Subtract']:
av = -1
if av != 0:
nv = self.changedvolume.get() + av
if nv in range(-20,5):
self.changedvolume.set(nv)
self.vol_changed(nv)
def playsong(self, index):
if not self.omxprocess:
self.prevbutton['state'] = tk.DISABLED
self.nextbutton['state'] = tk.DISABLED
self.songpointer = index
pargs = ['omxplayer', '--vol', str(self.currentvolume*300)] + self.omxoptions + [self.playlist[index]]
self.omxprocess = subprocess.Popen(pargs,stdin=subprocess.PIPE,stdout=file('/dev/null','wa'))
self.omxwatcher = threading.Timer(0,self.watch)
self.omxwatcher.start()
self.status = 'playing'
self.playcontent.set(self.pausestring)
selection = self.playlistwindow.curselection()
if not selection or index != int(selection[0]):
self.listpointer = index
self.playlistwindow.selection_clear(0, len(self.playlist)-1)
self.playlistwindow.selection_set(index)
self.playlistwindow.see(index)
time.sleep(0.3)
self.prevbutton['state'] = tk.NORMAL
self.nextbutton['state'] = tk.NORMAL
def on_close(self):
if self.omxprocess:
self.status='closing'
self.sendcommand('q')
time.sleep(0.1)
if self.omxprocess:
try:
self.omxprocess.terminate()
time.sleep(0.1)
except:
pass
if self.omxprocess:
try:
self.omxprocess.kill()
time.sleep(0.1)
except:
pass
self.root.destroy()
def on_finished(self, *args):
stat = self.status
self.status = 'stopped'
self.playcontent.set(self.playstring)
if stat != 'finished':
if self.songpointer == self.listpointer:
self.nextsong()
else:
self.songpointer = self.listpointer
self.playsong(self.songpointer)
def watch(self):
if self.omxprocess:
try:
dummy = self.omxprocess.wait()
except:
pass
self.omxprocess = None
if self.status != 'closing':
self.root.event_generate("<<finished>>")
def sendcommand(self, cmd):
if self.omxprocess:
try:
self.omxprocess.stdin.write(cmd)
except:
pass
def playpause(self):
if self.status in ['stopped','finished']:
self.songpointer = self.listpointer
self.playsong(self.songpointer)
elif self.status == 'paused':
self.sendcommand('p')
self.status = 'playing'
self.playcontent.set(self.pausestring)
elif self.status == 'playing':
self.sendcommand('p')
self.status = 'paused'
self.playcontent.set(self.playstring)
def stop(self,stat='finished'):
if self.omxprocess:
self.status = stat
self.sendcommand('q')
else:
self.playcontent.set(self.playstring)
self.status = 'stopped'
def rewind(self):
self.sendcommand('\x1b\x5b\x44')
def forward(self):
self.sendcommand('\x1b\x5b\x43')
def prevsong(self):
if self.listpointer != self.songpointer and self.status != 'stopped':
self.stop('stopped')
elif self.listpointer > 0:
self.listpointer = self.listpointer - 1
self.playlistwindow.selection_clear(0, len(self.playlist)-1)
self.playlistwindow.selection_set(self.listpointer)
if self.status == 'stopped':
self.playsong(self.listpointer)
else:
self.stop('stopped')
def nextsong(self):
if self.listpointer != self.songpointer and self.status != 'stopped':
self.stop('stopped')
elif self.listpointer < len(self.playlist)-1:
self.listpointer = self.listpointer + 1
self.playlistwindow.selection_clear(0, len(self.playlist)-1)
self.playlistwindow.selection_set(self.listpointer)
if self.status == 'stopped':
self.playsong(self.listpointer)
else:
self.stop('stopped')
elif self.autofinish:
self.on_close()
def vol_changed(self, volume):
vol = int(volume)
if self.status != 'stopped':
if vol > self.currentvolume:
diff = vol - self.currentvolume
self.currentvolume = vol
for k in range(0,diff):
self.sendcommand('+')
time.sleep(self.vchdelay)
elif vol < self.currentvolume:
diff = self.currentvolume - vol
self.currentvolume = vol
for k in range(0,diff):
self.sendcommand('-')
time.sleep(self.vchdelay)
else:
self.currentvolume = vol
def on_listbox_select(self,event):
sel = self.playlistwindow.curselection()
if sel:
self.listpointer = int(sel[0])
def on_listbox_double(self,event):
self.on_listbox_select(event)
if self.status != 'stopped':
if self.songpointer == self.listpointer:
self.stop()
self.playsong(self.listpointer)
else:
self.stop('stopped')
else:
self.playsong(self.listpointer)
def focus_out(self, event):
self.root.focus_set()
def createwidgets(self):
if len(self.playlist) > self.maxlines:
self.yScroll = tk.Scrollbar(self, orient=tk.VERTICAL)
self.yScroll['width'] = int(self.yScroll['width']) + (self.fontheight-10)
hg = self.maxlines
else:
hg = len(self.playlist)
self.playlistwindow = tk.Listbox(self, takefocus=0, selectmode = 'single', width = self.defaultwidth, height = hg, font=self.font,activestyle='none',bg='#000', fg = '#ddd', selectbackground='#60c', selectforeground='#ffffd0')
for url in self.playlist:
song = url.split('/')[-1]
self.playlistwindow.insert(tk.END, urllib.unquote(song).replace('%20',' '))
self.playlistwindow.selection_set(self.songpointer)
self.playlistwindow.bind("<<ListboxSelect>>", self.on_listbox_select)
self.playlistwindow.bind("<Double-Button-1>",self.on_listbox_double)
self.playlistwindow.bind("<FocusIn>",self.focus_out)
self.playlistwindow.grid(row=0,column=0,columnspan=7, sticky=tk.N+tk.S+tk.E+tk.W)
if len(self.playlist) > self.maxlines:
self.playlistwindow.configure(yscrollcommand=self.yScroll.set)
self.yScroll['command'] = self.playlistwindow.yview
self.yScroll.grid(row=0,column=7, sticky=tk.N+tk.S)
self.playbutton = tk.Button(self, command=self.playpause, font=self.font, textvariable = self.playcontent, width = 3, justify = tk.CENTER)
self.playbutton.grid(row=1,column=0)
self.stopbutton = tk.Button(self, command=self.stop, font=self.font, text = self.stopstring, width = 3, justify = tk.CENTER)
self.stopbutton.grid(row=1,column=1)
self.prevbutton = tk.Button(self, command=self.rewind, font=self.font, text = self.rewstring, width = 3, justify = tk.CENTER)
self.prevbutton.grid(row=1,column=2)
self.nextbutton = tk.Button(self, command=self.forward, font=self.font, text = self.fwdstring, width = 3, justify = tk.CENTER)
self.nextbutton.grid(row=1,column=3)
self.prevbutton = tk.Button(self, command=self.prevsong, font=self.font, text = self.prevstring, width = 3, justify = tk.CENTER)
self.prevbutton.grid(row=1,column=4)
self.nextbutton = tk.Button(self, command=self.nextsong, font=self.font, text = self.nextstring, width = 3, justify = tk.CENTER)
self.nextbutton.grid(row=1,column=5)
self.volume = tk.Scale(self, command=self.vol_changed, font=self.font, length=str((self.fontheight-2)*(self.defaultwidth-30))+'p', from_ = -20, to=4, variable=self.changedvolume ,orient=tk.HORIZONTAL, resolution=1, showvalue=0)
self.volume.grid(row=1,column=6)
# main script function
args = sys.argv
if len(args) > 2:
mode = args[1]
url = args[2]
mimetype = ''
# media section: play audio, video, m3u playlists and streams
if mode == 'av':
mtflag = True
if len(args) > 3:
mimetype = args[3]
if mimetypes and mimetype not in mimetypes:
mtflag = False
url_extension = url.lower().split('.')[-1]
if url_extension in ['m3u','m3u8','pls'] and mtflag:
audioonly, playlist = get_playlist(url,try_stream_as_audio)
if playlist:
if audioonly and useVLC:
os.execl("/usr/bin/vlc","vlc",url)
elif audioonly and useAudioplayer:
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
root = tk.Tk()
player = omxaudioplayer(master=root, playlist=playlist,volume=defaultaudiovolume,omxoptions=omxaudiooptions,
autofinish=autofinish,fontheight=fontheight,fontname=fontname,maxlines=maxlines,
autoplay=autoplay,width=lwidth)
player.mainloop()
else:
if audioonly:
options = omxaudiooptions
else:
options = omxoptions
if kill_omxplayer:
script = '#!/bin/bash\nkillall omxplayer.bin > /dev/null 2>&1\n'
else:
script = '#!/bin/bash\n'
for s in playlist:
if audioonly and omxplayer_in_terminal_for_audio:
script += 'echo "now playing: '+ urllib.unquote(s.split('/')[-1]) +'"\n'
script += 'omxplayer ' + get_opt(options) + ' "' + s + '" > /dev/null 2>&1\n'
f = file(dldir+os.sep+'playall.sh','wb')
f.write(script)
f.close()
os.chmod(dldir+os.sep+'playall.sh',511)
if omxplayer_in_terminal_for_audio and audioonly:
os.execlp(preferred_terminal,preferred_terminal,"-e",dldir+os.sep+'playall.sh')
elif omxplayer_in_terminal_for_video and not audioonly:
os.execl("/usr/bin/xterm","xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",dldir+os.sep+'playall.sh')
else:
os.execl(dldir+os.sep+'playall.sh','playall.sh')
elif mtflag:
url_valid = True
if url.startswith('file://'):
url = url.replace('file://','').replace('%20',' ')
url = urllib.unquote(url)
if not os.path.exists(url):
url_valid = False
elif not url.startswith('http'):
if not os.path.exists(url):
url_valid = False
if url_valid:
if url_extension in audioextensions or (try_stream_as_audio and not url_extension in videoextensions):
if useVLC:
os.execl("/usr/bin/vlc","vlc",url)
elif useAudioplayer:
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
root = tk.Tk()
player = omxaudioplayer(master=root, playlist=[url],volume=defaultaudiovolume,omxoptions=omxaudiooptions,
autofinish=autofinish,fontheight=fontheight,fontname=fontname,maxlines=maxlines,
autoplay=autoplay,width=lwidth)
player.mainloop()
else:
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
if omxplayer_in_terminal_for_audio:
pargs = [preferred_terminal,'-e','omxplayer'] + omxaudiooptions + [url]
os.execvp(preferred_terminal,pargs)
else:
pargs = ['omxplayer'] + omxaudiooptions + [url]
os.execvp('omxplayer',pargs)
else:
if kill_omxplayer:
dummy = os.system('killall omxplayer.bin > /dev/null 2>&1')
options = omxoptions
if live_tv:
for lt in live_tv:
if url.startswith(lt):
options = omx_livetv_options
break
if omxplayer_in_terminal_for_video:
pargs = ["xterm","-fn","fixed","-fullscreen", "-maximized", "-bg", "black", "-fg", "black", "-e",'omxplayer']+options+[url]+['>', '/dev/null', '2>&1']
os.execv("/usr/bin/xterm",pargs)
else:
pargs = ['omxplayer'] + omxoptions + [url]
os.execvp('omxplayer',pargs)
# end of media section
# pdf section (download - if needed - and open pdf file)
elif mode == 'pdf':
if not (pdfprogpath and pdfprog):
if os.path.exists('/usr/bin/xpdf'):
pdfprogpath = '/usr/bin/xpdf'
pdfprog = 'xpdf'
else:
pdfprogpath = '/usr/bin/mupdf'
pdfprog = 'mupdf'
go = False
# option to open pdf as files from http://localhost instead of downloading them first
if pdfpathreplacements and url.startswith('http://localhost'):
for k,v in pdfpathreplacements.iteritems():
if url.startswith(k):
nurl = url.replace(k,v)
if os.path.exists(urllib.unquote(nurl.replace('file://','').replace('%20',' ').split('#')[0])):
url = nurl
break
if url.startswith('file://'):
url = url.replace('file://','').replace('%20',' ')
url = urllib.unquote(url)
urll = url.split('#page=')
f = urll[0]
if os.path.exists(f):
if len(urll) > 1:
page = urll[1].split('&')[0]
os.execv(pdfprogpath,[pdfprog]+pdfoptions+[f,page])
else:
os.execv(pdfprogpath,[pdfprog]+pdfoptions+[f])
else:
if url.endswith('.pdf') or url.endswith('.PDF') or '.pdf#page' in url.lower():
urll = url.split('#page=')
fname = urllib.unquote(urll[0].split('/')[-1].replace('%20',' '))
f = dldir+os.sep+urllib.unquote(urll[0].split('/')[-1].replace('%20',' '))
if os.path.exists(f):
go = True
else:
try:
fn,h = urllib.urlretrieve(urll[0],f)
go = True
except:
pass
if go:
if len(urll) > 1:
page = urll[1].split('&')[0]
os.execv(pdfprogpath,[pdfprog]+pdfoptions+[f,page])
else:
os.execv(pdfprogpath,[pdfprog]+pdfoptions+[f])
# end of pdf section
# download section
elif mode == 'dl':
# download file using wget
if show_download_in_terminal:
os.execlp(preferred_terminal,preferred_terminal,'-e', "wget", "-P", dldir,"--no-clobber","--adjust-extension","--content-disposition",url,"--load-cookies",homedir + "/.web_cookie_jar","--no-check-certificate")
else:
os.execl("/usr/bin/wget", "wget", "-P", dldir,"--no-clobber","--adjust-extension","--content-disposition",url,"--load-cookies",homedir + "/.web_cookie_jar","--no-check-certificate")
#end of download section
# command execution section
elif mode == 'cmd':
cmd = ''
formdata = False
cpage = 'file:///homepage.html?cmd='
url = url.decode('utf-8')
if url.startswith('#'):
cmd = url[1:]
elif url.startswith(cpage):
cmd = url.replace(cpage,'')
if not cmd.startswith('formdata'):
cmd = urllib.unquote_plus(cmd).replace('%20',' ')
elif url.startswith('http://localhost') and ('/homepage.html?cmd=' in url):
cmd = url.split('/homepage.html?cmd=')[1]
if not cmd.startswith('formdata'):
cmd = urllib.unquote_plus(cmd).replace('%20',' ')
if cmd:
if cmd.startswith('formdata'):
formdata = True
cmd = cmd.split('formdata')[1].strip()
if '&' in cmd:
cmdargs = cmd.split('&')
for ind in range(0,len(cmdargs)):
if '=' in cmdargs[ind]:
cargl = cmdargs[ind].split('=')
if cargl[0].startswith('quoted') and cargl[1] != '':
cmdargs[ind] = " '" + urllib.unquote_plus(cargl[1]) + "'"
elif cargl[0].startswith('dquoted') and cargl[1] != '':
cmdargs[ind] = ' "' + urllib.unquote_plus(cargl[1]) + '"'
elif cargl[1] != '':
cmdargs[ind] = ' ' + urllib.unquote_plus(cargl[1])
else:
cmdargs[ind] = ''
else:
cmdargs[ind] = ' ' + urllib.unquote_plus(cmdargs[ind]).strip()
cmd = ''.join(cmdargs).strip()
else:
cmd = urllib.unquote_plus(cmd).strip()
cmdl = cmd.split(' ')
if len(cmdl)>1 and cmdl[0] == 'sudo':
realname = cmdl[1]
else:
realname = cmdl[0]
desktop_app = False
if check_desktop and '/' not in realname:
if os.path.exists('/usr/share/applications/'+realname+'.desktop'):
desktop_app = True
if desktop_app or (realname in direct_commands) or (formdata and not formdata_in_terminal):
cmdline = cmd.encode('utf-8')
else:
cmdline = preferred_terminal + ' -e '+cmd.encode('utf-8')
if run_as_script:
dmcount = 0
scpath = dldir+os.sep+'temp'+str(dmcount)+'.sh'
while os.path.exists(scpath):
dmcount += 1
scpath = dldir+os.sep+'temp'+str(dmcount)+'.sh'
f = file(scpath,'wb')
f.write('#!/bin/bash\n'+cmdline+'\nrm '+scpath+'\n')
f.close()
os.chmod(scpath,511)
os.execl(scpath,scpath)
else:
try:
dummy = os.system(cmdline)
except:
pass
# end of command execution section
# web video section (HTML5 and all websites supported by youtube-dl)
elif mode == 'ytdl' and os.path.exists('/usr/bin/youtube-dl'): #youtube and HTML5 videos
if html5_first:
tags = video_tag_extractor(url)
if tags: #extract embedded html5 video
play_html5(tags)
else:
yta = ['youtube-dl', '-g']+youtube_dl_options+[url]
yt = subprocess.Popen(yta,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(res,err) = yt.communicate()
if res and not err:
play_ytdl(res)
else:
yta = ['youtube-dl', '-g']+youtube_dl_options+[url]
yt = subprocess.Popen(yta,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(res,err) = yt.communicate()
if res and not err:
play_ytdl(res)
else:
tags = video_tag_extractor(url)
if tags: #extract embedded html5 video
play_html5(tags)
# end of web video section
|
gpl-3.0
| 2,244,612,217,892,959,500
| 41.185766
| 235
| 0.53966
| false
| 3.798414
| false
| false
| false
|
bwohlberg/sporco
|
sporco/dictlrn/cbpdndl.py
|
1
|
18601
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2020 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Dictionary learning based on CBPDN sparse coding"""
from __future__ import print_function, absolute_import
import copy
import numpy as np
import sporco.cnvrep as cr
import sporco.admm.cbpdn as admm_cbpdn
import sporco.admm.ccmod as admm_ccmod
import sporco.pgm.cbpdn as pgm_cbpdn
import sporco.pgm.ccmod as pgm_ccmod
from sporco.dictlrn import dictlrn
import sporco.dictlrn.common as dc
from sporco.common import _fix_dynamic_class_lookup
from sporco.linalg import inner
from sporco.fft import (rfftn, irfftn, rfl2norm2)
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
def cbpdn_class_label_lookup(label):
"""Get a CBPDN class from a label string."""
clsmod = {'admm': admm_cbpdn.ConvBPDN,
'pgm': pgm_cbpdn.ConvBPDN}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvBPDN solver method %s' % label)
def ConvBPDNOptionsDefaults(method='admm'):
"""Get defaults dict for the ConvBPDN class specified by the ``method``
parameter.
"""
dflt = copy.deepcopy(cbpdn_class_label_lookup(method).Options.defaults)
if method == 'admm':
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
else:
dflt.update({'MaxMainIter': 1})
return dflt
def ConvBPDNOptions(opt=None, method='admm'):
"""A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional BPDN problem, and returns an object
instantiated with the provided parameters. The wrapper is designed
to allow the appropriate object to be created by calling this
function using the same syntax as would be used if it were a
class. The specific implementation is selected by use of an
additional keyword argument 'method'. Valid values are as
specified in the documentation for :func:`ConvBPDN`.
"""
# Assign base class depending on method selection argument
base = cbpdn_class_label_lookup(method).Options
# Nested class with dynamically determined inheritance
class ConvBPDNOptions(base):
def __init__(self, opt):
super(ConvBPDNOptions, self).__init__(opt)
# Allow pickling of objects of type ConvBPDNOptions
_fix_dynamic_class_lookup(ConvBPDNOptions, method)
# Return object of the nested class type
return ConvBPDNOptions(opt)
def ConvBPDN(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems, and returns an object instantiated with the provided
parameters. The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'admm'`` :
Use the implementation defined in :class:`.admm.cbpdn.ConvBPDN`.
- ``'pgm'`` :
Use the implementation defined in :class:`.pgm.cbpdn.ConvBPDN`.
The default value is ``'admm'``.
"""
# Extract method selection argument or set default
method = kwargs.pop('method', 'admm')
# Assign base class depending on method selection argument
base = cbpdn_class_label_lookup(method)
# Nested class with dynamically determined inheritance
class ConvBPDN(base):
def __init__(self, *args, **kwargs):
super(ConvBPDN, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvBPDN
_fix_dynamic_class_lookup(ConvBPDN, method)
# Return object of the nested class type
return ConvBPDN(*args, **kwargs)
def ccmod_class_label_lookup(label):
"""Get a CCMOD class from a label string."""
clsmod = {'ism': admm_ccmod.ConvCnstrMOD_IterSM,
'cg': admm_ccmod.ConvCnstrMOD_CG,
'cns': admm_ccmod.ConvCnstrMOD_Consensus,
'pgm': pgm_ccmod.ConvCnstrMOD}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMOD solver method %s' % label)
def ConvCnstrMODOptionsDefaults(method='pgm'):
"""Get defaults dict for the ConvCnstrMOD class specified by the
``method`` parameter.
"""
dflt = copy.deepcopy(ccmod_class_label_lookup(method).Options.defaults)
if method == 'pgm':
dflt.update({'MaxMainIter': 1})
else:
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
return dflt
def ConvCnstrMODOptions(opt=None, method='pgm'):
"""A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD problem, and returns an object
instantiated with the provided parameters. The wrapper is designed
to allow the appropriate object to be created by calling this
function using the same syntax as would be used if it were a
class. The specific implementation is selected by use of an
additional keyword argument 'method'. Valid values are as
specified in the documentation for :func:`ConvCnstrMOD`.
"""
# Assign base class depending on method selection argument
base = ccmod_class_label_lookup(method).Options
# Nested class with dynamically determined inheritance
class ConvCnstrMODOptions(base):
def __init__(self, opt):
super(ConvCnstrMODOptions, self).__init__(opt)
# Allow pickling of objects of type ConvCnstrMODOptions
_fix_dynamic_class_lookup(ConvCnstrMODOptions, method)
# Return object of the nested class type
return ConvCnstrMODOptions(opt)
def ConvCnstrMOD(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems, and returns an object instantiated with the provided
parameters. The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This
method works well for a small number of training images, but is very
slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This
method is slower than ``'ism'`` for small training sets, but has better
run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`.
This method is a good choice for large training sets.
- ``'pgm'`` :
Use the implementation defined in :class:`.pgm.ccmod.ConvCnstrMOD`.
This method is the best choice for large training sets.
The default value is ``'pgm'``.
"""
# Extract method selection argument or set default
method = kwargs.pop('method', 'pgm')
# Assign base class depending on method selection argument
base = ccmod_class_label_lookup(method)
# Nested class with dynamically determined inheritance
class ConvCnstrMOD(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMOD, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMOD
_fix_dynamic_class_lookup(ConvCnstrMOD, method)
# Return object of the nested class type
return ConvCnstrMOD(*args, **kwargs)
class ConvBPDNDictLearn(dictlrn.DictLearn):
r"""
Dictionary learning by alternating between sparse coding and dictionary
update stages.
|
.. inheritance-diagram:: ConvBPDNDictLearn
:parts: 2
|
The sparse coding is performed using
:class:`.admm.cbpdn.ConvBPDN` (see :cite:`wohlberg-2014-efficient`) or
:class:`.pgm.cbpdn.ConvBPDN` (see :cite:`chalasani-2013-fast` and
:cite:`wohlberg-2016-efficient`), and the dictionary update is computed
using :class:`.pgm.ccmod.ConvCnstrMOD` (see
:cite:`garcia-2018-convolutional1`) or one of the solver classes in
:mod:`.admm.ccmod` (see :cite:`wohlberg-2016-efficient` and
:cite:`sorel-2016-fast`). The coupling between sparse coding and
dictionary update stages is as in :cite:`garcia-2017-subproblem`.
Solve the optimisation problem
.. math::
\mathrm{argmin}_{\mathbf{d}, \mathbf{x}} \;
(1/2) \sum_k \left \| \sum_m \mathbf{d}_m * \mathbf{x}_{k,m} -
\mathbf{s}_k \right \|_2^2 + \lambda \sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1 \quad \text{such that}
\quad \mathbf{d}_m \in C \;\; \forall m \;,
where :math:`C` is the feasible set consisting of filters with
unit norm and constrained support, via interleaved alternation
between the ADMM steps of the :class:`.admm.cbpdn.ConvBPDN` and
:func:`.ConvCnstrMOD` problems. Multi-channel variants
:cite:`wohlberg-2016-convolutional` are also supported.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k \|_2^2`
``RegL1`` : Value of regularisation term :math:`\sum_k \sum_m
\| \mathbf{x}_{k,m} \|_1`
``Cnstr`` : Constraint violation measure
*If the ADMM solver is selected for sparse coding:*
``XPrRsdl`` : Norm of X primal residual
``XDlRsdl`` : Norm of X dual residual
``XRho`` : X penalty parameter
*If the PGM solver is selected for sparse coding:*
``X_F_Btrack`` : Value of objective function for CSC problem
``X_Q_Btrack`` : Value of quadratic approximation for CSC problem
``X_ItBt`` : Number of iterations in backtracking for CSC problem
``X_L`` : Inverse of gradient step parameter for CSC problem
*If an ADMM solver is selected for the dictionary update:*
``DPrRsdl`` : Norm of D primal residual
``DDlRsdl`` : Norm of D dual residual
``DRho`` : D penalty parameter
*If the PGM solver is selected for the dictionary update:*
``D_F_Btrack`` : Value of objective function for CDU problem
``D_Q_Btrack`` : Value of wuadratic approximation for CDU problem
``D_ItBt`` : Number of iterations in backtracking for CDU problem
``D_L`` : Inverse of gradient step parameter for CDU problem
``Time`` : Cumulative run time
"""
class Options(dictlrn.DictLearn.Options):
"""CBPDN dictionary learning algorithm options.
Options include all of those defined in
:class:`.dictlrn.DictLearn.Options`, together with additional
options:
``AccurateDFid`` : Flag determining whether data fidelity term is
estimated from the value computed in the X update (``False``) or
is computed after every outer iteration over an X update and a D
update (``True``), which is slower but more accurate.
``DictSize`` : Dictionary size vector.
``CBPDN`` : An options class appropriate for the selected
sparse coding solver class
``CCMOD`` : An options class appropriate for the selected
dictionary update solver class
"""
defaults = copy.deepcopy(dictlrn.DictLearn.Options.defaults)
defaults.update({'DictSize': None, 'AccurateDFid': False})
def __init__(self, opt=None, xmethod=None, dmethod=None):
"""
Valid values for parameters ``xmethod`` and ``dmethod`` are
documented in functions :func:`.ConvBPDN` and
:func:`.ConvCnstrMOD` respectively.
"""
if xmethod is None:
xmethod = 'admm'
if dmethod is None:
dmethod = 'pgm'
self.xmethod = xmethod
self.dmethod = dmethod
self.defaults.update(
{'CBPDN': ConvBPDNOptionsDefaults(xmethod),
'CCMOD': ConvCnstrMODOptionsDefaults(dmethod)})
# Initialisation of CBPDN and CCMOD keys here is required to
# ensure that the corresponding options have types appropriate
# for classes in the cbpdn and ccmod modules, and are not just
# standard entries in the parent option tree
dictlrn.DictLearn.Options.__init__(self, {
'CBPDN': ConvBPDNOptions(self.defaults['CBPDN'],
method=xmethod),
'CCMOD': ConvCnstrMODOptions(self.defaults['CCMOD'],
method=dmethod)})
if opt is None:
opt = {}
self.update(opt)
def __init__(self, D0, S, lmbda=None, opt=None, xmethod=None,
dmethod=None, dimK=1, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/cbpdndl_init.svg
:width: 20%
:target: ../_static/jonga/cbpdndl_init.svg
|
Parameters
----------
D0 : array_like
Initial dictionary array
S : array_like
Signal array
lmbda : float
Regularisation parameter
opt : :class:`ConvBPDNDictLearn.Options` object
Algorithm options
xmethod : string, optional (default 'admm')
String selecting sparse coding solver. Valid values are
documented in function :func:`.ConvBPDN`.
dmethod : string, optional (default 'pgm')
String selecting dictionary update solver. Valid values are
documented in function :func:`.ConvCnstrMOD`.
dimK : int, optional (default 1)
Number of signal dimensions. If there is only a single input
signal (e.g. if `S` is a 2D array representing a single image)
`dimK` must be set to 0.
dimN : int, optional (default 2)
Number of spatial/temporal dimensions
"""
if opt is None:
opt = ConvBPDNDictLearn.Options(xmethod=xmethod, dmethod=dmethod)
if xmethod is None:
xmethod = opt.xmethod
if dmethod is None:
dmethod = opt.dmethod
if opt.xmethod != xmethod or opt.dmethod != dmethod:
raise ValueError('Parameters xmethod and dmethod must have the '
'same values used to initialise the Options '
'object')
self.opt = opt
self.xmethod = xmethod
self.dmethod = dmethod
# Get dictionary size
if self.opt['DictSize'] is None:
dsz = D0.shape
else:
dsz = self.opt['DictSize']
# Construct object representing problem dimensions
cri = cr.CDU_ConvRepIndexing(dsz, S, dimK, dimN)
# Normalise dictionary
D0 = cr.Pcn(D0, dsz, cri.Nv, dimN, cri.dimCd, crp=True,
zm=opt['CCMOD', 'ZeroMean'])
# Modify D update options to include initial value for Y
optname = 'X0' if dmethod == 'pgm' else 'Y0'
opt['CCMOD'].update({optname: cr.zpad(
cr.stdformD(D0, cri.Cd, cri.M, dimN), cri.Nv)})
# Create X update object
xstep = ConvBPDN(D0, S, lmbda, opt['CBPDN'], method=xmethod,
dimK=dimK, dimN=dimN)
# Create D update object
dstep = ConvCnstrMOD(None, S, dsz, opt['CCMOD'], method=dmethod,
dimK=dimK, dimN=dimN)
# Configure iteration statistics reporting
isc = dictlrn.IterStatsConfig(
isfld=dc.isfld(xmethod, dmethod, opt),
isxmap=dc.isxmap(xmethod, opt), isdmap=dc.isdmap(dmethod),
evlmap=dc.evlmap(opt['AccurateDFid']),
hdrtxt=dc.hdrtxt(xmethod, dmethod, opt),
hdrmap=dc.hdrmap(xmethod, dmethod, opt),
fmtmap={'It_X': '%4d', 'It_D': '%4d'})
# Call parent constructor
super(ConvBPDNDictLearn, self).__init__(xstep, dstep, opt, isc)
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
return self.dstep.getdict(crop=crop)
def reconstruct(self, D=None, X=None):
"""Reconstruct representation."""
if D is None:
D = self.getdict(crop=False)
if X is None:
X = self.getcoef()
Df = rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
Xf = rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
DXf = inner(Df, Xf, axis=self.xstep.cri.axisM)
return irfftn(DXf, self.xstep.cri.Nv, self.xstep.cri.axisN)
def evaluate(self):
"""Evaluate functional value of previous iteration."""
if self.opt['AccurateDFid']:
if self.dmethod == 'pgm':
D = self.dstep.getdict(crop=False)
else:
D = self.dstep.var_y()
if self.xmethod == 'pgm':
X = self.xstep.getcoef()
else:
X = self.xstep.var_y()
Df = rfftn(D, self.xstep.cri.Nv, self.xstep.cri.axisN)
Xf = rfftn(X, self.xstep.cri.Nv, self.xstep.cri.axisN)
Sf = self.xstep.Sf
Ef = inner(Df, Xf, axis=self.xstep.cri.axisM) - Sf
dfd = rfl2norm2(Ef, self.xstep.S.shape,
axis=self.xstep.cri.axisN) / 2.0
rl1 = np.sum(np.abs(X))
return dict(DFid=dfd, RegL1=rl1,
ObjFun=dfd + self.xstep.lmbda * rl1)
else:
return None
|
bsd-3-clause
| 8,305,897,426,569,706,000
| 34.498092
| 77
| 0.626257
| false
| 3.833677
| false
| false
| false
|
kism/RViProgramLauncher
|
viinputdaemon.py
|
1
|
18123
|
# Input Daemon for the Visually Impared
# For use with a device that outputs serial
import uinput #interface between python and the uinput kernel module
import time #for time.sleep()
import serial #the keyboard this program interfaces with uses serial
import os
import sys
# Easier debugging :^)
class termcolour:
PINK = '\033[95m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
WHITE = '\033[0m'
# Figure out what to do on the keypresses
def sendLetter(letter):
global caps
global numb
print termcolour.GREEN + 'Sent ASCII Char:' + termcolour.WHITE
if numb == True:
if letter == 'KEY_A':
device.emit_click(uinput.KEY_1)
if letter == 'KEY_B':
device.emit_click(uinput.KEY_2)
if letter == 'KEY_C':
device.emit_click(uinput.KEY_3)
if letter == 'KEY_D':
device.emit_click(uinput.KEY_4)
if letter == 'KEY_E':
device.emit_click(uinput.KEY_5)
if letter == 'KEY_F':
device.emit_click(uinput.KEY_6)
if letter == 'KEY_G':
device.emit_click(uinput.KEY_7)
if letter == 'KEY_H':
device.emit_click(uinput.KEY_8)
if letter == 'KEY_I':
device.emit_click(uinput.KEY_9)
if letter == 'KEY_J':
device.emit_click(uinput.KEY_0)
else:
if caps == 0:
device.emit_click(getattr(uinput,letter))
if caps == 1:
caps = 0
device.emit_combo([
uinput.KEY_LEFTSHIFT,
getattr(uinput,letter),
])
if caps == 2:
device.emit_combo([
uinput.KEY_LEFTSHIFT,
getattr(uinput,letter),
])
def f1(inProgram):
print termcolour.PINK + 'F1 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 1
device.emit_click(uinput.KEY_1)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F1'
if inProgram == 'nano': # Open Help
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_G,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
if inProgram == 'newsbeuter': # Open Help
device.emit_combo([
uinput.KEY_LEFTSHIFT,
uinput.KEY_SLASH,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
if inProgram == 'alpine': # Open Help
device.emit_combo([
uinput.KEY_LEFTSHIFT,
uinput.KEY_SLASH,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
if inProgram == 'links': # Open Help
device.emit_click(uinput.KEY_F9)
time.sleep(0.1)
device.emit_click(uinput.KEY_H)
time.sleep(0.1)
device.emit_click(uinput.KEY_M)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
if inProgram == 'irssi': # Open Help
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_H)
time.sleep(0.01)
device.emit_click(uinput.KEY_E)
time.sleep(0.01)
device.emit_click(uinput.KEY_L)
time.sleep(0.01)
device.emit_click(uinput.KEY_P)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
if inProgram == 'zsh': # Go to home directory
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_C,
])
device.emit_click(uinput.KEY_C)
time.sleep(0.01)
device.emit_click(uinput.KEY_D)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Home'
if inProgram == 'man': # Help
device.emit_click(uinput.KEY_H)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Help'
def f2(inProgram):
print termcolour.PINK + 'F2 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 2
device.emit_click(uinput.KEY_2)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F2'
if inProgram == 'nano': # Open File
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_R,
])
time.sleep(0.1)
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_T,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Open File'
if inProgram == 'newsbeuter': # Open Entry
device.emit_click(uinput.KEY_ENTER)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Open Entry'
if inProgram == 'alpine': # Open Index
device.emit_click(uinput.KEY_I)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Index'
if inProgram == 'links': # Go to
device.emit_click(uinput.KEY_G)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Go to'
if inProgram == 'irssi': # Connect
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_C)
time.sleep(0.01)
device.emit_click(uinput.KEY_O)
time.sleep(0.01)
device.emit_click(uinput.KEY_N)
time.sleep(0.01)
device.emit_click(uinput.KEY_N)
time.sleep(0.01)
device.emit_click(uinput.KEY_E)
time.sleep(0.01)
device.emit_click(uinput.KEY_C)
time.sleep(0.01)
device.emit_click(uinput.KEY_T)
time.sleep(0.01)
device.emit_click(uinput.KEY_SPACE)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Connect'
if inProgram == 'zsh': # Use the mplayer alias
device.emit_click(uinput.KEY_P)
time.sleep(0.01)
device.emit_click(uinput.KEY_L)
time.sleep(0.01)
device.emit_click(uinput.KEY_A)
time.sleep(0.01)
device.emit_click(uinput.KEY_Y)
time.sleep(0.01)
device.emit_click(uinput.KEY_SPACE)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Play'
def f3(inProgram):
print termcolour.PINK + 'F3 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 3
device.emit_click(uinput.KEY_3)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F3'
if inProgram == 'nano': # Save file
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_O,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Save File'
if inProgram == 'newsbeuter': # Save entry to file
device.emit_click(uinput.KEY_S)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Save Story'
if inProgram == 'alpine': # Compose
device.emit_click(uinput.KEY_C)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Compose'
if inProgram == 'links': # Open menu
device.emit_click(uinput.KEY_F9)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Menu'
if inProgram == 'irssi': # Join
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_J)
time.sleep(0.01)
device.emit_click(uinput.KEY_O)
time.sleep(0.01)
device.emit_click(uinput.KEY_I)
time.sleep(0.01)
device.emit_click(uinput.KEY_N)
time.sleep(0.01)
device.emit_click(uinput.KEY_SPACE)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Join'
def f4(inProgram):
print termcolour.PINK + 'F4 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 4
device.emit_click(uinput.KEY_4)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F4'
if inProgram == 'nano': # Cancel
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_C,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Cancel'
if inProgram == 'alpine': # Back
device.emit_click(uinput.KEY_M)
time.sleep(0.1)
device.emit_click(uinput.KEY_COMMA)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Back'
if inProgram == 'links': # Cancel
device.emit_click(uinput.KEY_ESC)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Cancel'
if inProgram == 'irssi': # Part
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_P)
time.sleep(0.01)
device.emit_click(uinput.KEY_A)
time.sleep(0.01)
device.emit_click(uinput.KEY_R)
time.sleep(0.01)
device.emit_click(uinput.KEY_T)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Part'
if inProgram == 'zsh': # Cancel
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_C,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Cancel'
def f5(inProgram):
print termcolour.PINK + 'F5 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 5
device.emit_click(uinput.KEY_5)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F5'
if inProgram == 'nano': # Cut
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_K,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Cut'
if inProgram == 'newsbeuter': # Reload
device.emit_click(uinput.KEY_R)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Reload'
if inProgram == 'alpine': # Journal
device.emit_click(uinput.KEY_J)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Journal'
if inProgram == 'irssi': # Query
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_Q)
time.sleep(0.01)
device.emit_click(uinput.KEY_U)
time.sleep(0.01)
device.emit_click(uinput.KEY_E)
time.sleep(0.01)
device.emit_click(uinput.KEY_R)
time.sleep(0.01)
device.emit_click(uinput.KEY_Y)
time.sleep(0.01)
device.emit_click(uinput.SPACE)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Query'
def f6(inProgram):
print termcolour.PINK + 'F6 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 6
device.emit_click(uinput.KEY_6)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F6'
if inProgram == 'nano': # Uncut
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_U,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Uncut'
if inProgram == 'newsbeuter': # Open next unread
uinput.KEY_N,
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Next Unread'
if inProgram == 'alpine': # Address
device.emit_click(uinput.KEY_A)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Address'
if inProgram == 'irssi': # Previous window
device.emit_combo([
uinput.KEY_CTRL,
uinput.KEY_P,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Previous window'
def f7(inProgram):
print termcolour.PINK + 'F7 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 7
device.emit_click(uinput.KEY_7)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F7'
if inProgram == 'nano': # Find
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_W,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Find'
if inProgram == 'newsbeuter': # Open in browser
device.emit_click(uinput.KEY_O)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Open in Browser'
if inProgram == 'alpine': # Setup email
device.emit_click(uinput.KEY_S)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Setup'
if inProgram == 'links': # Find on page
device.emit_click(uinput.KEY_SLASH)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Find'
if inProgram == 'irssi': # Next window
device.emit_combo([
uinput.KEY_CTRL,
uinput.KEY_N,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Next Window'
def f8(inProgram):
print termcolour.PINK + 'F8 Pressed' + termcolour.WHITE
print termcolour.GREEN + 'Program:' + termcolour.WHITE, inProgram
if inProgram == 'viui': # Open menu item 8
device.emit_click(uinput.KEY_8)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
time.sleep(0.01)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'F8'
if inProgram == 'nano': # Exit menu or program
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_X,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'newsbeuter': # Quit
device.emit_click(uinput.KEY_Q)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'alpine': # Quit
device.emit_click(uinput.KEY_Q)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'links': # Quit
device.emit_click(uinput.KEY_Q)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'irssi': # Quit
device.emit_click(uinput.KEY_SLASH)
time.sleep(0.01)
device.emit_click(uinput.KEY_Q)
time.sleep(0.01)
device.emit_click(uinput.KEY_U)
time.sleep(0.01)
device.emit_click(uinput.KEY_I)
time.sleep(0.01)
device.emit_click(uinput.KEY_T)
time.sleep(0.01)
device.emit_click(uinput.KEY_ENTER)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'zsh': # Quit
device.emit_combo([
uinput.KEY_LEFTCTRL,
uinput.KEY_D,
])
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
if inProgram == 'man': # Quit
device.emit_click(uinput.KEY_Q)
print termcolour.GREEN + 'Command:' + termcolour.WHITE, 'Quit'
# Main Program
print termcolour.PINK + '~ Daemon initialising ~' + termcolour.WHITE
# Check if program was run with an arguement
if len(sys.argv) > 1:
print termcolour.GREEN + 'Argument:' + termcolour.WHITE, str(sys.argv[1])
program = str(sys.argv[1])
else:
print termcolour.YELLOW + 'No args, what are you even doing?' + termcolour.WHITE
program = ''
# Python-uinput is a quality Interface, To find key codes check /usr/include/linux/input.h
device = uinput.Device([
uinput.KEY_A,
uinput.KEY_B,
uinput.KEY_C,
uinput.KEY_D,
uinput.KEY_E,
uinput.KEY_F,
uinput.KEY_G,
uinput.KEY_H,
uinput.KEY_I,
uinput.KEY_J,
uinput.KEY_K,
uinput.KEY_L,
uinput.KEY_M,
uinput.KEY_N,
uinput.KEY_O,
uinput.KEY_P,
uinput.KEY_Q,
uinput.KEY_R,
uinput.KEY_S,
uinput.KEY_T,
uinput.KEY_U,
uinput.KEY_V,
uinput.KEY_W,
uinput.KEY_X,
uinput.KEY_Y,
uinput.KEY_Z,
uinput.KEY_1,
uinput.KEY_2,
uinput.KEY_3,
uinput.KEY_4,
uinput.KEY_5,
uinput.KEY_6,
uinput.KEY_7,
uinput.KEY_8,
uinput.KEY_9,
uinput.KEY_0,
uinput.KEY_TAB,
uinput.KEY_ENTER,
uinput.KEY_SPACE,
uinput.KEY_DOT,
uinput.KEY_COMMA,
uinput.KEY_SLASH,
uinput.KEY_BACKSLASH,
uinput.KEY_LEFTCTRL,
uinput.KEY_LEFTALT,
uinput.KEY_LEFTSHIFT,
uinput.KEY_BACKSPACE,
uinput.KEY_PAGEDOWN,
uinput.KEY_PAGEUP,
uinput.KEY_UP,
uinput.KEY_LEFT,
uinput.KEY_RIGHT,
uinput.KEY_DOWN,
uinput.KEY_ESC,
uinput.KEY_F1,
uinput.KEY_F2,
uinput.KEY_F3,
uinput.KEY_F4,
uinput.KEY_F5,
uinput.KEY_F6,
uinput.KEY_F7,
uinput.KEY_F8,
uinput.KEY_F9,
uinput.KEY_F10,
uinput.KEY_F11,
uinput.KEY_F12,
uinput.KEY_1,
uinput.KEY_2,
uinput.KEY_3,
uinput.KEY_4,
uinput.KEY_5,
uinput.KEY_6,
uinput.KEY_7,
uinput.KEY_8,
uinput.KEY_9,
uinput.KEY_0,
])
# Open serial decice
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout = 1)
print termcolour.GREEN + 'Serial device opened:' + termcolour.WHITE, ser.name
# Mad Hacks go here
caps = 0
numb = False
if program == 'newsbeuter':
time.sleep(2.0)
device.emit_click(uinput.KEY_R)
time.sleep(3.0)
device.emit_click(uinput.KEY_ENTER)
# Polling for input
while 1:
sbuf = ser.read()
print 'Buffer Queue =', ser.inWaiting()
print 'Read =', sbuf
# All values are in hex, not actuall ascii, lol python
# Braille Modifier Characters
if sbuf == '\x01': # Caps
if caps > 1:
caps = 2
else:
caps = caps + 1
print termcolour.GREEN + 'Caps:' + termcolour.WHITE, caps
if sbuf == '\x0F': # Number
if numb == True:
numb = False
else:
numb = True
print termcolour.GREEN + 'Numb:' + termcolour.WHITE, numb
# Regular Keys
if sbuf == '\x20':
sendLetter('KEY_A')
if sbuf == '\x30':
sendLetter('KEY_B')
if sbuf == '\x24':
sendLetter('KEY_C')
if sbuf == '\x26':
sendLetter('KEY_D')
if sbuf == '\x22':
sendLetter('KEY_E')
if sbuf == '\x34':
sendLetter('KEY_F')
if sbuf == '\x36':
sendLetter('KEY_G')
if sbuf == '\x32':
sendLetter('KEY_H')
if sbuf == '\x14':
sendLetter('KEY_I')
if sbuf == '\x16':
sendLetter('KEY_J')
if sbuf == '\x28':
sendLetter('KEY_K')
if sbuf == '\x38':
sendLetter('KEY_L')
if sbuf == '\x2C':
sendLetter('KEY_M')
if sbuf == '\x2E':
sendLetter('KEY_N')
if sbuf == '\x2A':
sendLetter('KEY_O')
if sbuf == '\x3C':
sendLetter('KEY_P')
if sbuf == '\x3E':
sendLetter('KEY_Q')
if sbuf == '\x3A':
sendLetter('KEY_R')
if sbuf == '\x1C':
sendLetter('KEY_S')
if sbuf == '\x1E':
sendLetter('KEY_T')
if sbuf == '\x29':
sendLetter('KEY_U')
if sbuf == '\x39':
sendLetter('KEY_V')
if sbuf == '\x17':
sendLetter('KEY_W')
if sbuf == '\x2D':
sendLetter('KEY_X')
if sbuf == '\x2F':
sendLetter('KEY_Y')
if sbuf == '\x2B':
sendLetter('KEY_Z')
if sbuf == '\x10':
device.emit_click(uinput.KEY_COMMA)
if sbuf == '\x13':
device.emit_click(uinput.KEY_DOT)
if sbuf == '\x0C':
device.emit_click(uinput.KEY_SLASH)
if sbuf == '\x60':
device.emit_click(uinput.KEY_SPACE)
caps = 0
numb = 0
# IBM Compatable PC Keys
if sbuf == '\x40':
device.emit_click(uinput.KEY_ESC)
if sbuf == '\x41':
device.emit_click(uinput.KEY_UP)
if sbuf == '\x42':
device.emit_click(uinput.KEY_LEFT)
if sbuf == '\x43':
device.emit_click(uinput.KEY_RIGHT)
if sbuf == '\x44':
device.emit_click(uinput.KEY_DOWN)
if sbuf == '\x45':
device.emit_click(uinput.KEY_ENTER)
if sbuf == '\x46':
device.emit_click(uinput.KEY_BACKSPACE)
if sbuf == '\x47':
device.emit_click(uinput.KEY_PAGEUP)
if sbuf == '\x48':
device.emit_click(uinput.KEY_PAGEDOWN)
# Macro Keys
if sbuf == '\x81': #129
f1(program)
if sbuf == '\x82': #130
f2(program)
if sbuf == '\x83': #131
f3(program)
if sbuf == '\x84': #132
f4(program)
if sbuf == '\x85': #133
f5(program)
if sbuf == '\x86': #134
f6(program)
if sbuf == '\x87': #135
f7(program)
if sbuf == '\x88': #136
f8(program)
|
mit
| -6,499,945,008,790,168,000
| 27.81399
| 90
| 0.674668
| false
| 2.501795
| false
| false
| false
|
JNeiger/robocup-software
|
soccer/gameplay/plays/no_opponents/offensive_pivot_kick.py
|
1
|
1448
|
import play
import behavior
import robocup
import skills.line_kick
import tactics.defense
import main
import constants
import enum
import role_assignment
class OffensivePivotKick(play.Play):
def __init__(self):
super().__init__(continuous=False)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
self.add_transition(
behavior.Behavior.State.running, behavior.Behavior.State.completed,
lambda: self.has_subbehavior_with_name('kicker') and self.subbehavior_with_name('kicker').is_done_running(),
"kicker finished")
def on_enter_running(self):
kicker = skills.pivot_kick.PivotKick()
kicker.target = constants.Field.TheirGoalSegment
kicker.aim_params = {'error_threshold': .01,
'desperate_timeout': 10,
'max_steady_ang_vel': 4}
self.add_subbehavior(kicker, 'kicker', required=True, priority=100)
def on_exit_running(self):
self.remove_subbehavior('kicker')
@classmethod
def score(cls):
gs = main.game_state()
#Currently has lower priority than basic_122. Maybe add a check to see if we have all our robots?
return 15 if gs.is_playing() else float("inf")
@classmethod
def handles_goalie(self):
return False
|
apache-2.0
| -2,246,814,608,390,388,200
| 31.177778
| 120
| 0.627072
| false
| 3.934783
| false
| false
| false
|
FederatedAI/FATE
|
examples/benchmark_quality/hetero_nn/fate-hetero_nn.py
|
1
|
6666
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from tensorflow.keras import initializers
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataIO
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from pipeline.runtime.entity import JobParameters
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./hetero_nn_breast_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
backend = config.backend
work_mode = config.work_mode
guest_train_data = {"name": param["guest_table_name"], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param["host_table_name"], "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
dataio_0 = DataIO(name="dataio_0")
dataio_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
dataio_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=param["epochs"],
interactive_layer_lr=param["learning_rate"], batch_size=param["batch_size"],
early_stop="diff")
hetero_nn_0.add_bottom_model(Dense(units=param["bottom_layer_units"], input_shape=(10,), activation="tanh",
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=123)))
hetero_nn_0.set_interactve_layer(
Dense(units=param["interactive_layer_units"], input_shape=(param["bottom_layer_units"],), activation="relu",
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=123)))
hetero_nn_0.add_top_model(
Dense(units=param["top_layer_units"], input_shape=(param["interactive_layer_units"],),
activation=param["top_act"],
kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=123)))
opt = getattr(optimizers, param["opt"])(lr=param["learning_rate"])
hetero_nn_0.compile(optimizer=opt, metrics=param["metrics"],
loss=param["loss"])
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
if param["loss"] == "categorical_crossentropy":
eval_type = "multi"
else:
eval_type = "binary"
evaluation_0 = Evaluation(name="evaluation_0", eval_type=eval_type)
pipeline.add_component(reader_0)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_nn_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
nn_0_data = pipeline.get_component("hetero_nn_0").get_output_data().get("data")
nn_1_data = pipeline.get_component("hetero_nn_1").get_output_data().get("data")
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
if eval_type == "binary":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(nn_0_score, nn_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(nn_0_score, nn_1_score, nn_0_label,
nn_1_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
elif eval_type == "multi":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
|
apache-2.0
| 4,090,263,162,999,835,600
| 46.276596
| 117
| 0.671467
| false
| 3.547632
| true
| false
| false
|
wadobo/GECO
|
src/gecod/gecod/xmlrpc_frontend.py
|
1
|
3242
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Provides a xmlrpc frontend to gecod backend
'''
import backend
import secure_xmlrpc as sxmlrpc
HOST = 'localhost'
PORT = 4343
DATABASE = 'sqlite:///database.sqlite'
KEYFILE='certs/key.pem'
CERTFILE='certs/cert.pem'
def parseconfig(configfile):
global HOST, PORT, DATABASE, KEYFILE, CERTFILE
options = open(configfile).readlines()
for opt in options:
k, v = opt.split('=')
k = k.strip().lower()
v = v.strip()
if k == 'host':
HOST = v
elif k == 'port':
PORT = int(v)
elif k == 'database':
DATABASE = v
elif k == 'keyfile':
KEYFILE = v
elif k == 'certfile':
CERTFILE = v
class frontend:
def __init__(self):
pass
def auth(self, user, password):
'''
Return the cookie.
'''
return backend.auth(user, 'password',
password=password)
def logout(self, cookie):
backend.logout(cookie)
def register(self, user, password):
backend.register(user, password)
def unregister(self, cookie):
backend.unregister(cookie)
def change_password(self, cookie, new_password):
backend.change_password(cookie, new_password)
def change_attr(self, cookie, name, args):
'''
args is a dict with possible keys:
type, description, account, expiration, password
expiration must be a datetime
'''
backend.change_attr(cookie, name, **args)
def check_user_name(self, name):
return backend.check_user_name(name)
def set_password(self, cookie, name, password, args):
'''
args is a dict with possible keys:
type, description, account, expiration
expiration must be an integer (days)
'''
backend.set_password(cookie, name, password, **args)
def del_password(self, cookie, name):
backend.del_password(cookie, name)
def get_password(self, cookie, name):
p = backend.get_password(cookie, name)
return p
def get_passwords(self, cookie, args):
'''
args is a dict with possible keys:
name, type, updated, expiration, account
'''
p = backend.get_passwords_by(cookie, **args)
return [i for i in p]
def get_all_passwords(self, cookie):
'''
Return all passwords of user
'''
p = backend.get_all_passwords(cookie)
return [i for i in p]
def export(self, cookie):
'''
Returns a string with all passwords
ready to import
'''
return backend.export(cookie)
def restore(self, cookie, data):
'''
Restore data from a backup doit with export
'''
backend.restore(cookie, data)
def start_server():
sxmlrpc.EasyServer(HOST, PORT, frontend())
def main(config='gecod-xmlrpc.conf'):
parseconfig(config)
backend.DATABASE = DATABASE
sxmlrpc.KEYFILE = KEYFILE
sxmlrpc.CERTFILE = CERTFILE
try:
start_server()
except KeyboardInterrupt:
print "Closing"
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,722,768,706,466,853,000
| 22.664234
| 60
| 0.572795
| false
| 4.124682
| true
| false
| false
|
prior/webinars
|
webinars_web/webinars/views/events.py
|
1
|
10543
|
from operator import attrgetter
from django.http import HttpResponse,HttpResponseRedirect,HttpResponseNotFound,HttpResponseForbidden
from django.shortcuts import render_to_response
from django.views.decorators.http import require_GET, require_POST
from webinars_web.webinars.forms.event import EventForm
from marketplace.decorators import marketplace
from sanetime import time
from django.conf import settings
import hapi.leads
#from django.core import management
from django.template import RequestContext
from webinars_web.webinars import utils
import csv
import logging
def bucket_events(hub):
from webinars_web.webinars import models as wm
events = wm.Event.objects.filter(
account__hub=hub, deleted_at__isnull=True).select_related('current_sync','account').extra(
select={'registrant_count': 'SELECT COUNT(*) FROM webinars_registrant WHERE webinars_registrant.event_id=webinars_event.id'}).extra(
select={'attendant_count': 'SELECT COUNT(*) FROM webinars_registrant WHERE webinars_registrant.event_id=webinars_event.id AND started_at IS NOT NULL'})
events = sorted(events, key=attrgetter('starts_at'), reverse=True)
event_ids_form_ids = [(ef.event_id, ef.cms_form_id) for ef in wm.EventForm.objects.filter(event__in=wm.Event.objects.filter(account__hub=hub, deleted_at__isnull=True), cms_form__is_sync_target=False)]
event_id_to_form_ids_map = {}
for event_id, form_id in event_ids_form_ids:
event_id_to_form_ids_map.setdefault(event_id,[]).append(form_id)
form_ids_lps = [(lp.cms_form_id, lp) for lp in wm.LandingPage.objects.filter(cms_form__in=set(ef[1] for ef in event_ids_form_ids))]
form_id_to_lp_map = {}
for form_id, lp in form_ids_lps:
form_id_to_lp_map.setdefault(form_id,[]).append(lp)
for event in events: #TODO: this is creating an 2N+1 situation-- need to refactor!
event.landing_pages = []
for form_id in event_id_to_form_ids_map.get(event.id,[]):
event.landing_pages.extend(form_id_to_lp_map[form_id])
now = time()
return utils.partition(events, lambda e: (e.ended_at < now), [True,False])
@marketplace
@require_GET
def _list(request, which): # returns the partial list requested (future or past)-- used by ajax table replace
from webinars_web.webinars import models as wm
hub = wm.Hub.ensure(request.marketplace.hub_id)
buckets = bucket_events(hub)
is_future = which.lower()=='future'
is_past = not is_future
return render_to_response('events/_list.djml', {
'events': buckets[is_past],
'past': is_past,
'empty_callout': is_future
}, context_instance=RequestContext(request))
@marketplace
@require_GET
def list(request):
from webinars_web.webinars import models as wm
hub = wm.Hub.ensure(request.marketplace.hub_id, select_related=['current_sync','last_sync'])
buckets = bucket_events(hub)
return render_to_response('events/list.djml', {
'future_events': buckets[False],
'past_events': buckets[True],
'hub': hub,
}, context_instance=RequestContext(request))
def filter_registrants(registrants, segment):
if segment == 'noshows': return [r for r in registrants if not r.get('started_at')]
elif segment == 'attendees': return [r for r in registrants if r.get('started_at')]
else: return registrants
@marketplace
@require_GET
def export(request, event_id, segment):
if segment not in ['noshows', 'attendees', 'registrants']: return HttpResponseForbidden()
attrs = ['first_name', 'last_name', 'email']
from webinars_web.webinars import models as wm
registrant_set = wm.Event.objects.filter(pk=event_id)[0].registrant_set.values()
logging.debug('CSVDEBUG: event=%s, segment=%s' % (event_id, segment))
name = '%s%s' % (segment, event_id)
logging.debug('CSVDEBUG: filename=%s' % name)
people = filter_registrants(registrant_set, segment)
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % name
writer = csv.writer(response)
writer.writerow(['FirstName', 'LastName', 'Email'])
for p in people:
writer.writerow([p.get(attr).encode('utf-8') for attr in attrs])
return response
def get_fresh_last_modified_at(hub, guid):
leads_client = hapi.leads.LeadsClient(settings.HUBSPOT_API_KEY, hub_id=hub.id, env=settings.API_ENV)
leads = leads_client.get_leads(
time_pivot = 'lastModifiedAt',
sort = 'lastModifiedAt',
dir = 'desc',
max = 1,
form_guid = guid)
if leads:
return time(us=leads[0]['lastModifiedAt']*1000 + 1000)
else:
return time(0)
def new_or_edit(request, event_id=None):
from webinars_web.webinars import models as wm
hub = wm.Hub.ensure(request.marketplace.hub_id)
kwargs = {'hub':hub}
old_sync_leads_for_all_time = None
if event_id:
kwargs['instance']=wm.Event.objects.select_related('account').get(pk=event_id)
old_sync_leads_for_all_time = kwargs['instance'].sync_leads_for_all_time
if request.method == 'POST': # If the form has been submitted...
form = EventForm(request.POST, **kwargs) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
# Process the data in form.cleaned_data
# ...
event = form.save(commit=False)
tz = kwargs.get('instance') and kwargs['instance'].starts_at.tz or hub.timezone
event.starts_at = time(form.cleaned_data['starts_at_ndt'], tz)
event.duration = int(form.cleaned_data['duration'])
event.ensure_hashcode()
event.save()
old_cms_forms = dict((cf.guid, cf) for cf in event.cms_forms.all())
new_cms_forms = dict((cf.guid, cf) for cf in form.cleaned_data['cms_forms'])
for guid in (set(new_cms_forms) - set(old_cms_forms)):
wm.EventForm.objects.create(cms_form=new_cms_forms[guid], event=event, last_last_modified_at = not event.sync_leads_for_all_time and get_fresh_last_modified_at(hub, guid) or 0, converted_at_cutoff = not event.sync_leads_for_all_time and time() or 0)
for guid in (set(old_cms_forms) - set(new_cms_forms)):
wm.EventForm.objects.filter(cms_form=old_cms_forms[guid], event=event).delete()
if old_sync_leads_for_all_time is not None and old_sync_leads_for_all_time != event.sync_leads_for_all_time:
for event_form in event.eventform_set.all():
if event.sync_leads_for_all_time:
event_form.last_last_modified_at = 0
event_form.converted_at_cutoff = 0
# doing the else doesn't really make sense cuz we could've already been syncing before
event_form.save()
return HttpResponseRedirect('%sevents'%request.marketplace.base_url) # Redirect after POST
else:
wm.CmsForm.sync(hub)
form = EventForm(**kwargs) # An unbound form
return render_to_response('events/%s.djml'%(event_id and 'edit' or 'new'), {
'form': form,
}, context_instance=RequestContext(request))
@marketplace
def new(request):
return new_or_edit(request)
@marketplace
def edit(request, event_id):
return new_or_edit(request, event_id)
@marketplace
@require_POST
def destroy(request, event_id):
from webinars_web.webinars import models as wm
try:
event = wm.Event.objects.get(pk=event_id)
except Exception:
return HttpResponseNotFound()
if event.account.hub_id != request.marketplace.hub_id:
return HttpResponseForbidden()
event.deleted_at = time()
event.save()
return HttpResponse()
@marketplace
def show(request, event_id):
from webinars_web.webinars import models as wm
hub = wm.Hub.ensure(request.marketplace.hub_id)
try:
event = wm.Event.objects.select_related('account','account__hub').get(pk=event_id)
except:
return HttpResponseNotFound()
if event.account.hub_id != hub.id:
return HttpResponseForbidden()
registrants = event.registrant_set.select_related('cms_form').extra(
select = { 'durationx': 'IF(ISNULL(stopped_at) OR ISNULL(started_at), NULL, stopped_at-started_at)' },
order_by = ['-durationx']
)
for r in registrants:
r.event = event
lps = [lp for lp in wm.LandingPage.objects.filter(cms_form__event=event)]
forms_to_lps = {}
for lp in lps:
forms_to_lps.setdefault(lp.cms_form.guid,[]).append(lp)
for r in registrants:
if r.effective_duration:
if not r.cms_form or r.cms_form.is_sync_target:
r.landing_pages = []
else:
r.landing_pages = forms_to_lps[r.cms_form.guid]
now = time()
if event._time_ended_at or event.ends_at < now:
partitioned_registrants = utils.partition(registrants, lambda r: bool(r.started_at and r.stopped_at), [True, False])
return render_to_response('events/show.djml', {
'event': event,
'future': False,
'registrants': registrants,
'registrants_count': len(registrants),
'attendees': partitioned_registrants[True],
'attendees_count': len(partitioned_registrants[True]),
'noshows': partitioned_registrants[False],
'noshows_count': len(partitioned_registrants[False]),
'MARKETPLACE_SLUG': settings.MARKETPLACE_SLUG,
}, context_instance=RequestContext(request))
else:
return render_to_response('events/show.djml', {
'event': event,
'future': True,
'registrants': registrants,
'registrants_count': len(registrants),
'MARKETPLACE_SLUG': settings.MARKETPLACE_SLUG,
}, context_instance=RequestContext(request))
def sync(request, event_id):
from webinars_web.webinars import models as wm
force = request.REQUEST.get('force') and True or False
postbin = request.REQUEST.get('postbin') or None
auto = (request.REQUEST.get('auto') is None or request.REQUEST.get('auto').lower()!='false') and True or False
event = wm.Event.objects.get(pk=event_id)
sync_stages = event.trigger_sync(force=force, auto=auto)
return render_to_response('events/trigger_sync.djml', {'event':event, 'sync_stages':sync_stages, 'postbin':postbin}, context_instance=RequestContext(request))
|
apache-2.0
| -1,903,053,518,049,108,200
| 44.640693
| 265
| 0.656455
| false
| 3.433084
| false
| false
| false
|
edmundgentle/schoolscript
|
SchoolScript/bin/Debug/pythonlib/Lib/distutils/command/bdist_dumb.py
|
1
|
4801
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb(Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
"on platform %s" % os.name)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError(
"can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
gpl-2.0
| -1,751,620,946,305,370,000
| 38.008333
| 76
| 0.519267
| false
| 4.533522
| false
| false
| false
|
Fabien-B/Web_ASA_Sourdoire
|
www/parcelle.py
|
1
|
4236
|
import mysql.connector
import datetime
class Parcelle(object):
database = 'IENAC14_asa'
user = 'root'
password = 'root'
host = '127.0.0.1'
def __init__(self,id_parc,compteur=None,nom=None,lat=None,lon=None,altitude=None):
if id_parc>0:
self.load(id_parc)
else:
connection = mysql.connector.connect(user=Parcelle.user, password=Parcelle.password,host=Parcelle.host,database=Parcelle.database)
curseur = connection.cursor()
requete = 'select max(Id_parcelle) from Parcelle;'
curseur.execute(requete)
(maxId,)=curseur.fetchall()[0]
self.id = maxId + 1
self.compteur = compteur
self.nom = nom
self.lat = lat
self.lon = lon
self.altitude = altitude
curseur.close()
connection.close()
def save(self):
if self.compteur == None:
raise ParcelleError("compteur missing for create parcelle")
connection = mysql.connector.connect(user=Parcelle.user, password=Parcelle.password,host=Parcelle.host,database=Parcelle.database)
curseur = connection.cursor()
requete = "INSERT INTO Parcelle VALUES ({0},{1},{2},{3},{4},{5});".format(self.id, self.compteur, self.nom, self.lat, self.lon, self.altitude)
curseur.execute(requete)
connection.commit()
curseur.close()
connection.close()
def load(self,id_parc):
connection = mysql.connector.connect(user=Parcelle.user, password=Parcelle.password,host=Parcelle.host,database=Parcelle.database)
curseur = connection.cursor()
requete = 'select * from Parcelle where Id_parcelle={};'.format(id_parc)
curseur.execute(requete)
try:
(_,compteur,nom,lat,lon,altitude)=curseur.fetchall()[0]
except IndexError:
raise ParcelleError("Parcelle with id {} doesn't exist".format(id_parc))
curseur.close()
connection.close()
self.id = id_parc
self.compteur = compteur
self.nom = nom
self.lat = lat
self.lon = lon
self.altitude = altitude
def release_my_ornot(self, exploitant=0):
connection = mysql.connector.connect(user=Parcelle.user, password=Parcelle.password,host=Parcelle.host,database=Parcelle.database)
curseur = connection.cursor()
actualtime = str(datetime.datetime.now())
requete = 'UPDATE Propriete SET date_fin="{1}" WHERE Id_parcelle={0} AND date_fin IS NULL;'.format(self.id, actualtime)
curseur.execute(requete)
requete = 'select max(Id_propriete) from Propriete;'
curseur.execute(requete)
(maxId,)=curseur.fetchall()[0]
if exploitant==0:
requete = 'INSERT INTO Propriete VALUES({2}, {0}, 0, "{1}", NULL);'.format(self.id, actualtime, maxId+1)
else:
requete = 'INSERT INTO Propriete VALUES({2}, {0}, {3}, "{1}", NULL);'.format(self.id, actualtime, maxId+1, exploitant.id)
curseur.execute(requete)
connection.commit()
curseur.close()
connection.close()
@staticmethod
def get_exploitant_parcelle_id(id_ex):
connection = mysql.connector.connect(user=Parcelle.user, password=Parcelle.password,host=Parcelle.host,database=Parcelle.database)
curseur = connection.cursor()
if id_ex == 0:
requete = 'select Id_parcelle FROM Parcelle;'
elif id_ex == -1: #parcelles libres
requete = 'select Parcelle.Id_parcelle FROM Parcelle,Propriete WHERE Propriete.Id_parcelle = Parcelle.Id_parcelle AND Id_exploitant = 0 AND date_fin IS NULL ORDER BY Parcelle.Id_parcelle;'
else:
requete = 'select Parcelle.Id_parcelle FROM Parcelle,Propriete WHERE Propriete.Id_parcelle = Parcelle.Id_parcelle AND Id_exploitant = {0} AND date_fin IS NULL ORDER BY Parcelle.Id_parcelle;'.format(id_ex)
curseur.execute(requete)
id_parc = curseur.fetchall()
curseur.close()
connection.close()
id_parc_list = []
for (id,) in id_parc:
id_parc_list.append(id)
return id_parc_list
class ParcelleError(Exception):
pass
|
lgpl-3.0
| -4,204,626,003,705,896,400
| 41.37
| 216
| 0.628895
| false
| 3.27357
| false
| false
| false
|
saknis/upelis
|
logs4.py
|
1
|
5178
|
#!/usr/bin/env python
import base64
import cgi
import datetime
import logging
import os
import time
#from datetime import datetime, date, time
import urllib
import wsgiref.handlers
import string
from google.appengine.api import users
from google.appengine.api.logservice import logservice
from google.appengine.ext import db
#from google.appengine.ext import webapp
import webapp2 as webapp
# This sample gets the app request logs up to the current time, displays 5 logs
# at a time, including all AppLogs, with a Next link to let the user "page"
# through the results, using the RequestLog offset property.
class MainPage(webapp.RequestHandler):
def get(self):
logging.info('Starting Main handler')
# Get the incoming offset param from the Next link to advance through
# the logs. (The first time the page is loaded, there won't be any offset.)
start_time_set=False
try:
offset = self.request.get('offset') or None
if offset:
offset = base64.urlsafe_b64decode(str(offset))
except TypeError:
offset = None
try:
start_time = self.request.get('start_time') or None
if start_time:
start_time = float(base64.urlsafe_b64decode(str(start_time)))
start_time_set=True
except TypeError:
start_time = None
start_time_set=False
try:
filter = str(self.request.get('filter')) or None
except TypeError:
filter = None
# Set up end time for our query.
# Count specifies the max number of RequestLogs shown at one time.
# Use a boolean to initially turn off visiblity of the "Next" link.
count = 1000
show_next = True
last_offset = 5000
dt=datetime.datetime.now()
tt=dt.timetuple()
year=tt[0]
month=tt[1]
ttt=time.strptime((("01 %s %s") % (month,year)), "%d %m %Y")
if not start_time_set:
end_time = time.time()
start_time = time.mktime(ttt)
else:
dt2=datetime.datetime.utcfromtimestamp(float(start_time))
tt2=dt2.timetuple()
year2=tt2[0]
month2=tt2[1]
month2=month2+1
if month2==13:
month2=1
year2=year2+1
ttt2=time.strptime((("01 %s %s") % (month2,year2)), "%d %m %Y")
end_time=time.mktime(ttt2)
dt3=datetime.datetime.utcfromtimestamp(float(start_time))
tt3=dt3.timetuple()
year3=tt3[0]
month3=tt3[1]
month3=month3-1
if month3==0:
month3=12
year3=year3-1
ttt3=time.strptime((("01 %s %s") % (month3,year3)), "%d %m %Y")
start_time_next=time.mktime(ttt3)
# Iterate through all the RequestLog objects, displaying some fields and
# iterate through all AppLogs beloging to each RequestLog count times.
# In each iteration, save the offset to last_offset; the last one when
# count is reached will be used for the link.
i = 0
for req_log in logservice.fetch(start_time=start_time,end_time=end_time, offset=offset,
minimum_log_level=logservice.LOG_LEVEL_INFO,
include_app_logs=False):
ip=req_log.ip
status=str(req_log.status)
if filter and status and not string.find(status, filter) == -1:
# self.response.out.write("<br /> REQUEST LOG <br />")
# self.respons
self.response.out.write("""%s <br />""" % (req_log.combined))
i += 1
else:
if not filter:
self.response.out.write("""%s <br />""" % (req_log.combined))
i += 1
# self.response.out.write("""IP: %s <br /> Method: %s <br />
# Resource: %s <br />""" % (req_log.ip,
# req_log.method, req_log.resource))
# self.response.out.write("Date: "+datetime.datetime.fromtimestamp(req_log.end_time).strftime('%D %T UTC') +"<br />")
last_offset= req_log.offset
for app_log in req_log.app_logs:
self.response.out.write("<br />APP LOG<br />")
statslink = ("<a href=\"http://%s/stats/details?time=%s\">%s</a>" % (os.environ['HTTP_HOST'], app_log.time,app_log.time))
self.response.out.write("<br />STATS DETAILS: %s<br />" % (statslink))
self.response.out.write("Date: "+datetime.datetime.fromtimestamp(app_log.time).strftime('%Y-%m-%d %H:%M:%S UTC') +"<br />")
self.response.out.write("<br />Message: "+app_log.message+"<br />")
if i >= count:
show_next = True
break
# Prepare the offset URL parameters, if any.
if show_next:
query = self.request.GET
query['offset'] = base64.urlsafe_b64encode(last_offset)
query['start_time'] = base64.urlsafe_b64encode(("%s")%(start_time_next))
next_link = urllib.urlencode(query)
self.response.out.write("<a href=\"/logs4?"+next_link+"\">Next</a>")
self.response.out.write("<br />")
#def main():
logging.getLogger().setLevel(logging.DEBUG)
app = webapp.WSGIApplication([
('/logs4', MainPage),
], debug=True)
# wsgiref.handlers.CGIHandler().run(application)
#if __name__ == '__main__':
# main()
|
lgpl-2.1
| 5,720,461,625,587,779,000
| 33
| 131
| 0.60506
| false
| 3.415567
| false
| false
| false
|
clearpathrobotics/axis_camera
|
nodes/axis.py
|
1
|
8123
|
#!/usr/bin/env python
#
# Axis camera image driver. Based on:
# https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera
# /axis.py
#
import threading
import urllib2
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
class StreamThread(threading.Thread):
def __init__(self, axis):
threading.Thread.__init__(self)
self.axis = axis
self.daemon = True
self.timeoutSeconds = 2.5
def run(self):
while(True):
self.stream()
def stream(self):
while(True):
self.formURL()
self.authenticate()
if self.openURL():
self.publishFramesContinuously()
rospy.sleep(2) # if stream stays intact we shouldn't get to this
def formURL(self):
self.url = 'http://%s/mjpg/video.mjpg' % self.axis.hostname
self.url += "?fps=0&resolution=%dx%d" % (self.axis.width,
self.axis.height)
# support for Axis F34 multicamera switch
if (self.axis.camera != 0):
self.url += "&camera=%d" % self.axis.camera
rospy.logdebug('opening ' + str(self.axis))
def authenticate(self):
'''only try to authenticate if user/pass configured. I have not
used this method (yet).'''
if self.axis.password != '' and self.axis.username != '':
# create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Add the username and password, use default realm.
top_level_url = "http://" + self.axis.hostname
password_mgr.add_password(None, top_level_url, self.axis.username,
self.axis.password)
if self.axis.use_encrypted_password :
handler = urllib2.HTTPDigestAuthHandler(password_mgr)
else:
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
# create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(handler)
# ...and install it globally so it can be used with urlopen.
urllib2.install_opener(opener)
def openURL(self):
'''Open connection to Axis camera using http'''
try:
self.fp = urllib2.urlopen(self.url, timeout=self.timeoutSeconds)
return(True)
except urllib2.URLError, e:
rospy.logwarn('Error opening URL %s' % (self.url) +
'Possible timeout. Looping until camera appears')
return(False)
def publishFramesContinuously(self):
'''Continuous loop to publish images'''
while(True):
try:
self.findBoundary()
self.getImage()
self.publishMsg()
self.publishCameraInfoMsg()
except:
rospy.loginfo('Timed out while trying to get message.')
break
def findBoundary(self):
'''The string "--myboundary" is used to denote the start of an image in
Axis cameras'''
while(True):
boundary = self.fp.readline()
if boundary=='--myboundary\r\n':
break
def getImage(self):
'''Get the image header and image itself'''
self.getHeader()
self.getImageData()
def getHeader(self):
self.header = {}
while(True):
line = self.fp.readline()
if line == "\r\n":
break
line = line.strip()
parts = line.split(": ", 1)
try:
self.header[parts[0]] = parts[1]
except:
rospy.logwarn('Problem encountered with image header. Setting '
'content_length to zero')
self.header['Content-Length'] = 0 # set content_length to zero if
# there is a problem reading header
self.content_length = int(self.header['Content-Length'])
def getImageData(self):
'''Get the binary image data itself (ie. without header)'''
if self.content_length>0:
self.img = self.fp.read(self.content_length)
self.fp.readline() # Read terminating \r\n and do nothing with it
def publishMsg(self):
'''Publish jpeg image as a ROS message'''
self.msg = CompressedImage()
self.msg.header.stamp = rospy.Time.now()
self.msg.header.frame_id = self.axis.frame_id
self.msg.format = "jpeg"
self.msg.data = self.img
self.axis.pub.publish(self.msg)
def publishCameraInfoMsg(self):
'''Publish camera info manager message'''
cimsg = self.axis.cinfo.getCameraInfo()
cimsg.header.stamp = self.msg.header.stamp
cimsg.header.frame_id = self.axis.frame_id
cimsg.width = self.axis.width
cimsg.height = self.axis.height
self.axis.caminfo_pub.publish(cimsg)
class Axis:
def __init__(self, hostname, username, password, width, height, frame_id,
camera_info_url, use_encrypted_password, camera):
self.hostname = hostname
self.username = username
self.password = password
self.width = width
self.height = height
self.frame_id = frame_id
self.camera_info_url = camera_info_url
self.use_encrypted_password = use_encrypted_password
self.camera = camera
# generate a valid camera name based on the hostname
self.cname = camera_info_manager.genCameraName(self.hostname)
self.cinfo = camera_info_manager.CameraInfoManager(cname = self.cname,
url = self.camera_info_url)
self.cinfo.loadCameraInfo() # required before getCameraInfo()
self.st = None
self.pub = rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=1)
self.caminfo_pub = rospy.Publisher("camera_info", CameraInfo, self, queue_size=1)
def __str__(self):
"""Return string representation."""
return(self.hostname + ',' + self.username + ',' + self.password +
'(' + str(self.width) + 'x' + str(self.height) + ')')
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
'''Lazy-start the image-publisher.'''
if self.st is None:
self.st = StreamThread(self)
self.st.start()
def main():
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': 'root', # default login name
'password': '',
'width': 640,
'height': 480,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password' : False,
'camera' : 0}
args = updateArgs(arg_defaults)
Axis(**args)
rospy.spin()
def updateArgs(arg_defaults):
'''Look up parameters starting in the driver's private parameter space, but
also searching outer namespaces. Defining them in a higher namespace allows
the axis_ptz.py script to share parameters with the driver.'''
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return(args)
if __name__ == "__main__":
main()
|
bsd-3-clause
| 9,019,188,777,415,011,000
| 36.43318
| 95
| 0.562723
| false
| 4.117081
| false
| false
| false
|
jacebrowning/gridcommand
|
setup.py
|
1
|
1810
|
#!/usr/bin/env python
"""Setup script for the package."""
import os
import sys
import setuptools
PACKAGE_NAME = 'gridcommand'
MINIMUM_PYTHON_VERSION = 3, 5
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
def read_package_variable(key):
"""Read the value of a variable from the package without importing."""
module_path = os.path.join(PACKAGE_NAME, '__init__.py')
with open(module_path) as module:
for line in module:
parts = line.strip().split(' ')
if parts and parts[0] == key:
return parts[-1].strip("'")
assert 0, "'{0}' not found in '{1}'".format(key, module_path)
def read_descriptions():
"""Build a description for the project from documentation files."""
try:
readme = open("README.rst").read()
changelog = open("CHANGELOG.rst").read()
except IOError:
return "<placeholder>"
else:
return readme + '\n' + changelog
check_python_version()
setuptools.setup(
name=read_package_variable('__project__'),
version=read_package_variable('__version__'),
description="TBD",
url='https://github.com/jacebrowning/gridcommand',
author='Jace Browning',
author_email='jacebrowning@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=read_descriptions(),
license='LGPL',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
],
install_requires=open("requirements.txt").readlines(),
)
|
lgpl-3.0
| -8,046,625,268,878,089,000
| 25.617647
| 78
| 0.628729
| false
| 3.892473
| false
| false
| false
|
ledbutter/ProjectEulerPython
|
Problem39.py
|
1
|
1375
|
# If p is the perimeter of a right angle triangle with integral length sides, {a,b,c}, there are exactly three solutions for p = 120.
# {20,48,52}, {24,45,51}, {30,40,50}
# For which value of p 1000, is the number of solutions maximised?
# p = 120
# sols = 0
# for a in range(1, p//2):
# for b in range(a+1, p-a):
# for c in range(b+1, p-a-b+1):
# if a**2 + b**2 == c**2 and a+b+c==p:
# print(a,b,c)
# sols += 1
# print(sols)
#def possible_perimters(p):
#http://blog.dreamshire.com/2009/04/22/project-euler-problem-39-solution/
# t_max = 0
# p_limit = 1000
# for p in range(p_limit//2, p_limit+1, 2):
# t = 0;
# for a in range(2, p//4+1):
# if p*(p - 2*a) % (2*(p-a)) == 0: t += 1
# if t > t_max: (t_max, p_max) = (t, p)
# print(p_max)
#840
#my original code would have worked but it was incredibly slow,
#this is an optimized version of that code based on the message board
from math import sqrt
max_p = max_solutions = current_solutions = 0
for p in range(500, 1001, 2):
#print(p)
current_solutions = 0
for a in range(1, p//4):
for b in range(a+1, (p-a)//2):
c = sqrt(a**2+b**2)
if a+b+c==p:
#print(a,b,c)
current_solutions += 1
if current_solutions > max_solutions:
max_p = p
max_solutions = current_solutions
print(max_p, max_solutions)
|
mit
| 9,017,787,300,578,095,000
| 22.157895
| 133
| 0.576727
| false
| 2.468582
| false
| false
| false
|
google/personfinder
|
app/indexing.py
|
1
|
10056
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for approximate string prefix queries.
A hit is defined when the words entered in the query are all prefixes of one
of the words in the given and family names on the record. For example, a
record with the fields:
given_name: ABC 123
family_name: DEF 456
will be retrieved by:
"ABC 456"
"45 ED"
"123 ABC"
"ABC 123 DEF"
but will not be retrieved by:
"ABC 1234"
"ABC 123 DEF 456 789"
"""
from text_query import TextQuery
from google.appengine.ext import db
import unicodedata
import logging
import model
import re
import jautils
def update_index_properties(entity):
"""Finds and updates all prefix-related properties on the given entity."""
# Using set to make sure I'm not adding the same string more than once.
names_prefixes = set()
for property in entity._fields_to_index_properties:
for value in TextQuery(getattr(entity, property)).query_words:
if property in entity._fields_to_index_by_prefix_properties:
for n in xrange(1,len(value)+1):
pref = value[:n]
if pref not in names_prefixes:
names_prefixes.add(pref)
else:
if value not in names_prefixes:
names_prefixes.add(value)
# Add alternate names to the index tokens. We choose not to index prefixes
# of alternate names so that we can keep the index size small.
# TODI(ryok): This strategy works well for Japanese, but how about other
# languages?
names_prefixes |= get_alternate_name_tokens(entity)
# Put a cap on the number of tokens, just as a precaution.
MAX_TOKENS = 100
entity.names_prefixes = list(names_prefixes)[:MAX_TOKENS]
if len(names_prefixes) > MAX_TOKENS:
logging.debug('MAX_TOKENS exceeded for %s' %
' '.join(list(names_prefixes)))
def get_alternate_name_tokens(person):
"""Returns alternate name tokens and their variations."""
tokens = set(TextQuery(person.alternate_names).query_words)
# This is no-op for non-Japanese.
tokens |= set(jautils.get_additional_tokens(tokens))
return tokens
class CmpResults():
def __init__(self, query):
self.query = query
self.query_words_set = set(query.words)
def __call__(self, p1, p2):
if ((p1.primary_full_name and
p1.primary_full_name == p2.primary_full_name) or
((p1.given_name or p1.family_name) and
p1.given_name == p2.given_name and
p1.family_name == p2.family_name)):
return 0
self.set_ranking_attr(p1)
self.set_ranking_attr(p2)
r1 = self.rank(p1)
r2 = self.rank(p2)
if r1 == r2:
# if rank is the same sort by name so same names will be together
return cmp(p1._normalized_full_name.normalized,
p2._normalized_full_name.normalized)
else:
return cmp(r2, r1)
def set_ranking_attr(self, person):
"""Consider save these into to db"""
if not hasattr(person, '_normalized_given_name'):
person._normalized_given_name = TextQuery(person.given_name)
person._normalized_family_name = TextQuery(person.family_name)
person._normalized_full_name = TextQuery(person.full_name)
person._name_words = set(person._normalized_full_name.words)
person._alt_name_words = set(
TextQuery(person.alternate_names).words)
# TODO(ryok): re-consider the ranking putting more weight on full_name (a
# required field) instead of given name and family name pair (optional).
def rank(self, person):
# The normalized query words, in the order as entered.
ordered_words = self.query.normalized.split()
if (ordered_words ==
person._normalized_given_name.words +
person._normalized_family_name.words):
# Matches a Latin name exactly (given name followed by surname).
return 10
if (re.match(ur'^[\u3400-\u9fff]$', person.family_name) and
ordered_words in [
[person.family_name + person.given_name],
[person.family_name, person.given_name]
]):
# Matches a CJK name exactly (surname followed by given name).
return 10
if (re.match(ur'^[\u3400-\u9fff]+$', person.family_name) and
ordered_words in [
[person.family_name + person.given_name],
[person.family_name, person.given_name]
]):
# Matches a CJK name exactly (surname followed by given name).
# A multi-character surname is uncommon, so it is ranked a bit lower.
return 9.5
if (ordered_words ==
person._normalized_family_name.words +
person._normalized_given_name.words):
# Matches a Latin name with given and family name switched.
return 9
if (re.match(ur'^[\u3400-\u9fff]$', person.given_name) and
ordered_words in [
[person.given_name + person.family_name],
[person.given_name, person.family_name]
]):
# Matches a CJK name with surname and given name switched.
return 9
if (re.match(ur'^[\u3400-\u9fff]+$', person.given_name) and
ordered_words in [
[person.given_name + person.family_name],
[person.given_name, person.family_name]
]):
# Matches a CJK name with surname and given name switched.
# A multi-character surname is uncommon, so it's ranked a bit lower.
return 8.5
if person._name_words == self.query_words_set:
# Matches all the words in the given and family name, out of order.
return 8
if self.query.normalized in [
person._normalized_given_name.normalized,
person._normalized_family_name.normalized,
]:
# Matches the given name exactly or the family name exactly.
return 7
if person._name_words.issuperset(self.query_words_set):
# All words in the query appear somewhere in the name.
return 6
# Count the number of words in the query that appear in the name and
# also in the alternate names.
matched_words = person._name_words.union(
person._alt_name_words).intersection(self.query_words_set)
return min(5, 1 + len(matched_words))
def rank_and_order(results, query, max_results):
results.sort(CmpResults(query))
return results[:max_results]
def sort_query_words(query_words):
"""Sort query_words so that the query filters created from query_words are
more effective and consistent when truncated due to NeedIndexError, and
return the sorted list."""
# (1) Sort them lexicographically so that we return consistent search
# results for query 'AA BB CC DD' and 'DD AA BB CC' even when filters
# are truncated.
sorted_query_words = sorted(query_words)
# (2) Sort them according to popularity so that less popular query words,
# which are usually more effective filters, come first.
sorted_query_words = jautils.sorted_by_popularity(sorted_query_words)
# (3) Sort them according to the lengths so that longer query words,
# which are usually more effective filters, come first.
return sorted(sorted_query_words, key=len, reverse=True)
def search(repo, query_obj, max_results):
# As there are limits on the number of filters that we can apply and the
# number of entries we can fetch at once, the order of query words could
# potentially matter. In particular, this is the case for most Japanese
# names, many of which consist of 4 to 6 Chinese characters, each
# coresponding to an additional filter.
query_words = sort_query_words(query_obj.query_words)
logging.debug('query_words: %r' % query_words)
# First try the query with all the filters, and then keep backing off
# if we get NeedIndexError.
fetch_limit = 400
fetched = []
filters_to_try = len(query_words)
while filters_to_try:
query = model.Person.all_in_repo(repo)
for word in query_words[:filters_to_try]:
query.filter('names_prefixes =', word)
try:
fetched = query.fetch(fetch_limit)
logging.debug('query succeeded with %d filters' % filters_to_try)
break
except db.NeedIndexError:
filters_to_try -= 1
continue
logging.debug('indexing.search fetched: %d' % len(fetched))
# Now perform any filtering that App Engine was unable to do for us.
matched = []
for result in fetched:
for word in query_words:
if word not in result.names_prefixes:
break
else:
matched.append(result)
logging.debug('indexing.search matched: %d' % len(matched))
if len(fetched) == fetch_limit and len(matched) < max_results:
logging.debug('Warning: Fetch reached a limit of %d, but only %d '
'exact-matched the query (max_results = %d).' %
(fetch_limit, len(matched), max_results))
# Now rank and order the results.
return rank_and_order(matched, query_obj, max_results)
|
apache-2.0
| 7,982,706,846,564,120,000
| 38.590551
| 81
| 0.625597
| false
| 4.020792
| false
| false
| false
|
miguelalba89/hfdp-python
|
combining/observer.py
|
1
|
7761
|
"""
Ducks problem with quackologists (observers)
Author: m1ge7
Date: 2014/03/24
"""
from abc import ABCMeta, abstractmethod
###############################################################################
#
###############################################################################
class QuackObservable:
__metaclass__ = ABCMeta
@abstractmethod
def register_observer(self, observer):
pass
@abstractmethod
def notify_observers(self):
pass
class Quackable(QuackObservable):
__metaclass__ = ABCMeta
@abstractmethod
def quack(self):
pass
class Observable(QuackObservable):
def __init__(self, duck):
self.__observers = []
self.__duck = duck
def register_observer(self, observer):
self.__observers.append(observer)
def notify_observers(self):
for obs in self.__observers:
obs.update(self.__duck)
def get_observers(self):
return self.__observers
class Observer:
__metaclass__ = ABCMeta
@abstractmethod
def update(self, duck):
pass
class Quackologist(Observer):
def update(self, duck):
print "Quackologist: " + str(duck) + " just quacked."
def __str__(self):
return "Quackologist"
###############################################################################
# Duck concrete classes
###############################################################################
class DecoyDuck(Quackable):
def __init__(self):
self.__observable = Observable(self)
def quack(self):
print "<< Silence >>"
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Decoy Duck"
class DuckCall(Quackable):
def __init__(self):
self.__observable = Observable(self)
def quack(self):
print "Kwak"
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Duck Call"
class MallardDuck(Quackable):
def __init__(self):
self.__observable = Observable(self)
def quack(self):
print "Quack"
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Mallard Duck"
class RedheadDuck(Quackable):
def __init__(self):
self.__observable = Observable(self)
def quack(self):
print "Quack"
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Redhead Duck"
class RubberDuck(Quackable):
def __init__(self):
self.__observable = Observable(self)
def quack(self):
print "Squeak"
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Rubber Duck"
###############################################################################
# Goose classes
###############################################################################
class Goose:
def honk(self):
print "Honk"
def __str__(self):
return "Goose"
class GooseAdapter(Quackable):
def __init__(self, goose):
self.__goose = goose
self.__observable = Observable(self)
def quack(self):
self.__goose.honk()
self.notify_observers()
def register_observer(self, observer):
self.__observable.register_observer(observer)
def notify_observers(self):
self.__observable.notify_observers()
def __str__(self):
return "Goose pretending to be a Duck"
###############################################################################
# QuackCounter
###############################################################################
class QuackCounter(Quackable):
number_of_quacks = 0
def __init__(self, duck):
self.__duck = duck
def quack(self):
self.__duck.quack()
QuackCounter.number_of_quacks += 1
@staticmethod
def get_quacks():
return QuackCounter.number_of_quacks
def register_observer(self, observer):
self.__duck.register_observer(observer)
def notify_observers(self):
self.__duck.notify_observers()
def __str__(self):
return str(self.__duck)
###############################################################################
# Factories
###############################################################################
class AbstractDuckFactory:
__metaclass__ = ABCMeta
@abstractmethod
def create_mallard_duck(self):
pass
@abstractmethod
def create_redhead_duck(self):
pass
@abstractmethod
def create_duck_call(self):
pass
@abstractmethod
def create_rubber_duck(self):
pass
class DuckFactory(AbstractDuckFactory):
def create_mallard_duck(self):
return MallardDuck()
def create_redhead_duck(self):
return RedheadDuck()
def create_duck_call(self):
return DuckCall()
def create_rubber_duck(self):
return RubberDuck()
class CountingDuckFactory(AbstractDuckFactory):
def create_mallard_duck(self):
return QuackCounter(MallardDuck())
def create_redhead_duck(self):
return QuackCounter(RedheadDuck())
def create_duck_call(self):
return QuackCounter(DuckCall())
def create_rubber_duck(self):
return QuackCounter(RubberDuck())
###############################################################################
# Flock
###############################################################################
class Flock(Quackable):
def __init__(self):
self.__ducks = []
def add(self, duck):
self.__ducks.append(duck)
def quack(self):
for duck in self.__ducks:
duck.quack()
def register_observer(self, observer):
for duck in self.__ducks:
duck.register_observer(observer)
def notify_observers():
pass
def __str__(self):
return "Flock of Ducks"
class DuckSimulator:
def simulate_factory(self, duck_factory):
print "\nDuck Simulator: With Composite - Flocks"
flock_of_ducks = Flock()
flock_of_ducks.add(duck_factory.create_redhead_duck())
flock_of_ducks.add(duck_factory.create_duck_call())
flock_of_ducks.add(duck_factory.create_rubber_duck())
flock_of_ducks.add(GooseAdapter(Goose()))
flock_of_mallards = Flock()
for i in range(4):
flock_of_mallards.add(duck_factory.create_mallard_duck())
flock_of_ducks.add(flock_of_mallards)
print "\nDuck Simulator: With Observer"
quackologist = Quackologist()
flock_of_ducks.register_observer(quackologist)
self.simulate_duck(flock_of_ducks)
print "The ducks quacked " + str(QuackCounter.get_quacks()) + " times"
def simulate_duck(self, duck):
duck.quack()
if __name__ == '__main__':
simulator = DuckSimulator()
duck_factory = CountingDuckFactory()
simulator.simulate_factory(duck_factory)
|
gpl-3.0
| 939,939,761,361,892,700
| 21.365994
| 79
| 0.541039
| false
| 3.943598
| false
| false
| false
|
pattisdr/osf.io
|
api/meetings/views.py
|
1
|
9146
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from django.db.models import Q, Count, Subquery, OuterRef, Case, When, Value, CharField, F, IntegerField
from django.db.models.functions import Length, Substr, Coalesce
from django.contrib.contenttypes.models import ContentType
from addons.osfstorage.models import OsfStorageFile
from api.base import permissions as base_permissions
from api.base.exceptions import InvalidFilterOperator
from api.base.filters import ListFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.utils import get_object_or_error
from api.base.versioning import PrivateVersioning
from api.meetings.serializers import MeetingSerializer, MeetingSubmissionSerializer
from api.meetings.permissions import IsPublic
from api.nodes.views import NodeMixin
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Conference, Contributor, Tag, PageCounter
from website import settings
class MeetingMixin(object):
"""Mixin with convenience method get_meeting
"""
meeting_lookup_url_kwarg = 'meeting_id'
def get_meeting(self):
meeting = get_object_or_error(
Conference,
Q(endpoint=self.kwargs[self.meeting_lookup_url_kwarg]),
self.request,
display_name='meeting',
)
return meeting
class BaseMeetingView(JSONAPIBaseView, MeetingMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.MEETINGS_READ]
required_write_scopes = [CoreScopes.NULL]
model = Conference
# This view goes under the _/ namespace
versioning_class = PrivateVersioning
serializer_class = MeetingSerializer
view_category = 'meetings'
class MeetingList(BaseMeetingView, generics.ListAPIView, ListFilterMixin):
view_name = 'meeting-list'
ordering = ('-modified', ) # default ordering
ordering_fields = ('name', 'submissions_count', 'location', 'start_date',)
# overrides ListFilterMixin
def get_default_queryset(self):
tags = Tag.objects.filter(
abstractnode_tagged__is_public=True,
abstractnode_tagged__is_deleted=False,
).annotate(
num_nodes=Count(F('abstractnode_tagged')),
).filter(name=OuterRef('endpoint'))
conferences = Conference.objects.filter(is_meeting=True).annotate(
submissions_count=Subquery(
tags.values('num_nodes')[:1], output_field=IntegerField(),
),
)
return conferences.filter(submissions_count__gte=settings.CONFERENCE_MIN_COUNT)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class MeetingDetail(BaseMeetingView, generics.RetrieveAPIView):
view_name = 'meeting-detail'
def get_object(self):
# No minimum submissions count for accessing meeting directly
return self.get_meeting()
class BaseMeetingSubmission(JSONAPIBaseView, MeetingMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
IsPublic,
)
required_read_scopes = [CoreScopes.MEETINGS_READ, CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
model = AbstractNode
# This view goes under the _/ namespace
versioning_class = PrivateVersioning
serializer_class = MeetingSubmissionSerializer
view_category = 'meetings'
def get_serializer_context(self):
context = super(BaseMeetingSubmission, self).get_serializer_context()
context['meeting'] = self.get_meeting()
return context
class MeetingSubmissionList(BaseMeetingSubmission, generics.ListAPIView, ListFilterMixin):
view_name = 'meeting-submissions'
ordering = ('-created', ) # default ordering
ordering_fields = ('title', 'meeting_category', 'author_name', 'download_count', 'created', )
# overrides ListFilterMixin
def get_default_queryset(self):
meeting = self.get_meeting()
return self.annotate_queryset_for_filtering_and_sorting(meeting, meeting.submissions)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
def build_query_from_field(self, field_name, operation):
if field_name == 'author_name':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
return Q(author_name__icontains=operation['value'])
if field_name == 'meeting_category':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
return Q(meeting_category__icontains=operation['value'])
return super(MeetingSubmissionList, self).build_query_from_field(field_name, operation)
def annotate_queryset_for_filtering_and_sorting(self, meeting, queryset):
queryset = self.annotate_queryset_with_meeting_category(meeting, queryset)
queryset = self.annotate_queryset_with_author_name(queryset)
queryset = self.annotate_queryset_with_download_count(queryset)
return queryset
def annotate_queryset_with_meeting_category(self, meeting, queryset):
"""
Annotates queryset with meeting_category - if submission1 tag exists, use this,
otherwise assume default submission2 tag
"""
# Setup meeting category subquery (really existence of certain tags)
category_1 = meeting.field_names.get('submission1', 'poster')
category_2 = meeting.field_names.get('submission2', 'talk')
tag_subquery = Tag.objects.filter(
abstractnode_tagged=OuterRef('pk'),
name=category_1,
).values_list('name', flat=True)
queryset = queryset.annotate(cat_one_count=Count(Subquery(tag_subquery))).annotate(
meeting_category=Case(
When(cat_one_count=1, then=Value(category_1)),
default=Value(category_2),
output_field=CharField(),
),
)
return queryset
def annotate_queryset_with_author_name(self, queryset):
"""
Annotates queryset with author_name_category - it is the family_name if it exists, otherwise,
the fullname is used
"""
# Setup author name subquery (really first bibliographic contributor)
contributors = Contributor.objects.filter(
visible=True,
node_id=OuterRef('pk'),
).order_by('_order')
queryset = queryset.annotate(
author_family_name=Subquery(contributors.values(('user__family_name'))[:1]),
author_full_name=Subquery(contributors.values(('user__fullname'))[:1]),
author_id=Subquery(contributors.values(('user__guids___id'))[:1]),
).annotate(
author_name=Case(
When(author_family_name='', then=F('author_full_name')),
default=F('author_family_name'),
output_field=CharField(),
),
)
return queryset
def annotate_queryset_with_download_count(self, queryset):
"""
Annotates queryset with download count of first osfstorage file
NOTE: This is a brittle way to do this. PageCounter _ids are of the form
<file_action>:<node__id>:<file__id>:<sometimes version>.
- Assumes the "download" file action is the only action with that many letters
- Assumes node and file guids are a consistent length
- ENG-122 would get rid of this string matching behavior
"""
pages = PageCounter.objects.annotate(
node_id=Substr('_id', 10, 5),
file_id=Substr('_id', 16),
_id_length=Length('_id'),
).filter(
_id__icontains='download',
node_id=OuterRef('guids___id'),
file_id=OuterRef('file_id'),
).exclude(_id_length__gt=39)
file_subqs = OsfStorageFile.objects.filter(
target_content_type_id=ContentType.objects.get_for_model(AbstractNode),
target_object_id=OuterRef('pk'),
).order_by('created')
queryset = queryset.annotate(
file_id=Subquery(file_subqs.values('_id')[:1]),
).annotate(
download_count=Coalesce(Subquery(pages.values('total')[:1]), Value(0)),
)
return queryset
class MeetingSubmissionDetail(BaseMeetingSubmission, generics.RetrieveAPIView, NodeMixin):
view_name = 'meeting-submission-detail'
serializer_class = MeetingSubmissionSerializer
node_lookup_url_kwarg = 'submission_id'
def get_object(self):
meeting = self.get_meeting()
node = self.get_node()
# Submission must be associated with the Conference
if meeting.endpoint not in node.tags.values_list('name', flat=True):
raise NotFound('This is not a submission to {}.'.format(meeting.name))
return node
|
apache-2.0
| -3,653,973,906,636,123,000
| 36.63786
| 104
| 0.663459
| false
| 4.083036
| false
| false
| false
|
duonys/deep-learning-chainer
|
dlchainer/SdA.py
|
1
|
5225
|
#-*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.externals.six import with_metaclass
from chainer import Variable, FunctionSet, optimizers, cuda
import chainer.functions as F
from .dA import dA
from . import utils
class SdAMixin(with_metaclass(ABCMeta, BaseEstimator)):
"""
Stacked Denoising Autoencoder
References:
http://deeplearning.net/tutorial/SdA.html
https://github.com/pfnet/chainer/blob/master/examples/mnist/train_mnist.py
"""
def __init__(self, n_input, n_hiddens, n_output, noise_levels=None, dropout_ratios=None, do_pretrain=True,
batch_size=100, n_epoch_pretrain=20, n_epoch_finetune=20, optimizer=optimizers.Adam(),
activation_func=F.relu, verbose=False, gpu=-1):
self.n_input = n_input
self.n_hiddens = n_hiddens
self.n_output = n_output
self.do_pretrain = do_pretrain
self.batch_size = batch_size
self.n_epoch_pretrain = n_epoch_pretrain
self.n_epoch_finetune = n_epoch_finetune
self.optimizer = optimizer
self.dAs = \
[dA(self.n_input, self.n_hiddens[0],
self._check_var(noise_levels, 0), self._check_var(dropout_ratios, 0), self.batch_size,
self.n_epoch_pretrain, copy.deepcopy(optimizer),
activation_func, verbose, gpu)] + \
[dA(self.n_hiddens[i], self.n_hiddens[i + 1],
self._check_var(noise_levels, i + 1), self._check_var(dropout_ratios, i + 1), self.batch_size,
self.n_epoch_pretrain, copy.deepcopy(optimizer),
activation_func, verbose, gpu) for i in range(len(n_hiddens) - 1)]
self.verbose = verbose
self.gpu = gpu
def _check_var(self, var, index, default_val=0.0):
return var[index] if var is not None else default_val
def fit(self, X, y):
if self.do_pretrain:
self._pretrain(X)
self._finetune(X, y)
def _pretrain(self, X):
for layer, dA in enumerate(self.dAs):
utils.disp('*** pretrain layer: {} ***'.format(layer + 1), self.verbose)
if layer == 0:
layer_input = X
else:
layer_input = self.dAs[layer - 1].encode(Variable(layer_input), train=False).data
dA.fit(layer_input)
def _finetune(self, X, y):
utils.disp('*** finetune ***', self.verbose)
# construct model and setup optimizer
params = {'l{}'.format(layer + 1): dA.encoder for layer, dA in enumerate(self.dAs)}
params.update({'l{}'.format(len(self.dAs) + 1): F.Linear(self.dAs[-1].n_hidden, self.n_output)})
self.model = FunctionSet(**params)
self.optimizer.setup(self.model)
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
self.model.to_gpu()
xp = cuda.cupy if self.gpu >= 0 else np
n = len(X)
for epoch in range(self.n_epoch_finetune):
utils.disp('epoch: {}'.format(epoch + 1), self.verbose)
perm = np.random.permutation(n)
sum_loss = 0
for i in range(0, n, self.batch_size):
X_batch = xp.asarray(X[perm[i: i + self.batch_size]])
y_batch = xp.asarray(y[perm[i: i + self.batch_size]])
self.optimizer.zero_grads()
y_var = self._forward(X_batch)
loss = self._loss_func(y_var, Variable(y_batch))
loss.backward()
self.optimizer.update()
sum_loss += float(loss.data) * len(X_batch)
utils.disp('fine tune mean loss={}'.format(sum_loss / n), self.verbose)
def _forward(self, X, train=True):
X_var = Variable(X)
output = X_var
for dA in self.dAs:
output = dA.encode(output, train)
y_var = self.model['l{}'.format(len(self.dAs) + 1)](output)
return y_var
@abstractmethod
def _loss_func(self, y_var, t_var):
pass
class SdAClassifier(SdAMixin, ClassifierMixin):
"""
References:
http://scikit-learn.org/stable/developers/#rolling-your-own-estimator
"""
def _loss_func(self, y_var, t_var):
return F.softmax_cross_entropy(y_var, t_var)
def fit(self, X, y):
assert X.dtype == np.float32 and y.dtype == np.int32
super().fit(X, y)
def transform(self, X):
return self._forward(X, train=False).data
def predict(self, X):
return np.apply_along_axis(lambda x: np.argmax(x), arr=self.transform(X), axis=1)
class SdARegressor(SdAMixin, RegressorMixin):
"""
References:
http://scikit-learn.org/stable/developers/#rolling-your-own-estimator
"""
def _loss_func(self, y_var, t_var):
y_var = F.reshape(y_var, [len(y_var)])
return F.mean_squared_error(y_var, t_var)
def fit(self, X, y):
assert X.dtype == np.float32 and y.dtype == np.float32
super().fit(X, y)
def transform(self, X):
return self._forward(X, train=False).data
def predict(self, X):
return self.transform(X)
|
mit
| -6,527,209,594,631,249,000
| 32.280255
| 110
| 0.587368
| false
| 3.347213
| false
| false
| false
|
SecPi/SecPi
|
worker/temperature_sensor.py
|
1
|
2642
|
from tools.sensor import Sensor
import glob
import logging
import os
import threading
import time
class TemperatureSensor(Sensor): #DS18B20 digital temperature sensor
def __init__(self, id, params, worker):
super(TemperatureSensor, self).__init__(id, params, worker)
#self.active = False
try:
self.min = int(params["min"])
self.max = int(params["max"])
self.bouncetime = int(params["bouncetime"])
self.device_id = params["device_id"]
except ValueError as ve: # if one configuration parameter can't be parsed as int
logging.error("TemperatureSensor: Wasn't able to initialize the sensor, please check your configuration: %s" % ve)
self.corrupted = True
return
except KeyError as ke: # if config parameters are missing
logging.error("TemperatureSensor: Wasn't able to initialize the sensor, it seems there is a config parameter missing: %s" % ke)
self.corrupted = True
return
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
#device_folder = glob.glob(base_dir + '28*')[0]
self.device_file = base_dir + self.device_id + '/w1_slave'
if not os.path.isfile(self.device_file): # if there is no slave file which contains the temperature
self.corrupted = True
logging.error("TemperatureSensor: Wasn't able to find temperature file at %s" % self.device_file)
return
logging.debug("TemperatureSensor: Sensor initialized")
def activate(self):
if not self.corrupted:
self.stop_thread = False
self.checker_thread = threading.Thread(name="thread-checker-%s" % self.device_id,
target=self.check_temperature)
self.checker_thread.start()
else:
logging.error("TemperatureSensor: Sensor couldn't be activated")
def deactivate(self):
if not self.corrupted:
self.stop_thread = True
else:
logging.error("TemperatureSensor: Sensor couldn't be deactivated")
def check_temperature(self):
while True:
if self.stop_thread: #exit thread when flag is set
return
current_temp = self.read_temp()
if current_temp < self.min or current_temp > self.max:
self.alarm("Temperature is not in valid range: %s" % current_temp)
time.sleep(self.bouncetime)
continue
time.sleep(3)
def read_temp_raw(self):
f = open(self.device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp(self):
lines = self.read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.00
return temp_c
|
gpl-3.0
| 729,324,569,803,099,900
| 30.452381
| 130
| 0.696064
| false
| 3.214112
| false
| false
| false
|
openstates/openstates
|
openstates/ct/events.py
|
1
|
1993
|
import datetime
import json
from pupa.scrape import Scraper, Event
import pytz
from .utils import open_csv
class CTEventScraper(Scraper):
_tz = pytz.timezone("US/Eastern")
def __init__(self, *args, **kwargs):
super(CTEventScraper, self).__init__(*args, **kwargs)
def scrape(self):
for (code, name) in self.get_comm_codes():
yield from self.scrape_committee_events(code, name)
def scrape_committee_events(self, code, name):
events_url = (
"http://www.cga.ct.gov/basin/fullcalendar/commevents.php?"
"comm_code={}".format(code)
)
events_data = self.get(events_url).text
events = json.loads(events_data)
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
for info in events:
if info["title"] is None:
self.warning("Event found with no title; it will be skipped")
continue
elif info["title"].startswith("CANCELLED:"):
self.info(
"Cancelled event found; it will be skipped: {}".format(
info["title"]
)
)
continue
when = datetime.datetime.strptime(info["start"], DATETIME_FORMAT)
# end = datetime.datetime.strptime(info['end'], DATETIME_FORMAT)
where = "{0} {1}".format(info["building"].strip(), info["location"].strip())
# end_time=self._tz.localize(end),
event = Event(
start_date=self._tz.localize(when),
location_name=where,
name=info["title"],
description=info["title"],
)
event.add_source(events_url)
yield event
def get_comm_codes(self):
url = "ftp://ftp.cga.ct.gov/pub/data/committee.csv"
page = self.get(url)
page = open_csv(page)
return [(row["comm_code"].strip(), row["comm_name"].strip()) for row in page]
|
gpl-3.0
| -1,077,329,166,467,352,700
| 31.145161
| 88
| 0.537883
| false
| 3.869903
| false
| false
| false
|
frink182/stevostat
|
pir.py
|
1
|
1322
|
#!/usr/bin/env python
from time import sleep
from time import strftime
import RPi.GPIO as GPIO
import os
import paho.mqtt.publish as publish
import paho.mqtt.client as mqtt
from datetime import datetime
PIR=26
SCREEN_TIMEOUT=300
SCREEN='/sys/class/backlight/rpi_backlight/bl_power'
ON=0
OFF=1
TOPIC="presence/PIR"
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIR, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def my_callback(channel):
sleep(0.5) # confirm the movement by waiting 0.5 sec
if GPIO.input(PIR): # and check again the input
publishMqtt()
screenOn()
# stop detection for a while
GPIO.remove_event_detect(PIR)
sleep(60)
GPIO.add_event_detect(PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
GPIO.add_event_detect(PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def publishMqtt():
message = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
publish.single(TOPIC, message, qos=0, retain=True, hostname="slug")
def screenOn():
toggleScreen(ON)
def screenOff():
toggleScreen(OFF)
def toggleScreen(value):
file = open(SCREEN,'r')
current_status = int(file.read(1))
if current_status != value:
# print strftime("%d %b %H:%M:%S") + " toggle screen to " + str(value)
os.system("echo " + str(value) + " > " + SCREEN)
# you can continue doing other stuff here
while True:
sleep(60)
|
gpl-2.0
| -4,928,873,503,737,569,000
| 23.036364
| 79
| 0.712557
| false
| 2.794926
| false
| false
| false
|
rane-hs/fabric-py3
|
tests/Python26SocketServer.py
|
1
|
22074
|
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# This file copyright (c) 2001-2015 Python Software Foundation; All Rights Reserved
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import select
import sys
import os
try:
import threading
except ImportError:
import dummy_threading as threading
__all__ = ["TCPServer", "UDPServer", "ForkingUDPServer", "ForkingTCPServer",
"ThreadingUDPServer", "ThreadingTCPServer", "BaseRequestHandler",
"StreamRequestHandler", "DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer", "UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__serving = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# select, get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.close_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print(('-' * 40))
print(('Exception happened during processing of request from %s' % (client_address,)))
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print(('-' * 40))
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass,
bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for children that have exited."""
if self.active_children is None:
return
while len(self.active_children) >= self.max_children:
# XXX: This will wait for any child process, not just ones
# spawned by this library. This could confuse other
# libraries that expect to be able to wait for their own
# children.
try:
pid, status = os.waitpid(0, 0)
except os.error:
pid = None
if pid not in self.active_children:
continue
self.active_children.remove(pid)
# XXX: This loop runs more system calls than it ought
# to. There should be a way to put the active_children into a
# process group and then use os.waitpid(-pgid) to wait for any
# of that set, but I couldn't find a way to allocate pgids
# that couldn't collide.
for child in self.active_children:
try:
pid, status = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if not pid:
continue
try:
self.active_children.remove(pid)
except ValueError as e:
raise ValueError('%s. x=%d and list=%r' % \
(e.message, pid, self.active_children))
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.close_request(request)
except:
self.handle_error(request, client_address)
self.close_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target=self.process_request_thread,
args=(request, client_address))
if self.daemon_threads:
t.setDaemon(1)
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer):
pass
class ForkingTCPServer(ForkingMixIn, TCPServer):
pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer):
pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer):
pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer):
pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_info()[2] = None # Help garbage collection
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
try:
from io import StringIO
except ImportError:
from io import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO(self.packet)
self.wfile = StringIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
|
bsd-2-clause
| 8,849,255,045,800,621,000
| 30.670014
| 94
| 0.642339
| false
| 4.378893
| false
| false
| false
|
StefGou/Kijiji-Repost-Headless
|
kijiji_repost_headless/kijiji_cmd.py
|
1
|
4487
|
import argparse
import os
import sys
from time import sleep
import kijiji_api
if sys.version_info < (3, 0):
raise Exception("This program requires Python 3.0 or greater")
def main():
##Start here
#Takes: config(user/pass)
#One of:
#post adPostingFile
#show
#delete adId
#show
#repost adPostingFile
parser = argparse.ArgumentParser(
description="Post ads on Kijiji")
parser.add_argument('-u', '--username', help='username of your kijiji account')
parser.add_argument('-p', '--password', help='password of your kijiji account')
subparsers = parser.add_subparsers(help ='sub-command help')
postParser = subparsers.add_parser('post', help='post a new ad')
postParser.add_argument('inf_file', type=str, help='.inf file containing posting details')
postParser.set_defaults(function=post_ad)
folderParser = subparsers.add_parser('folder', help='post ad from folder')
folderParser.add_argument('folderName', type=str, help='folder containing ad details')
folderParser.set_defaults(function=post_folder)
repostFolderParser = subparsers.add_parser('repost_folder', help='post ad from folder')
repostFolderParser.add_argument('folderName', type=str, help='folder containing ad details')
repostFolderParser.set_defaults(function=repost_folder)
showParser = subparsers.add_parser('show', help='show currently listed ads')
showParser.set_defaults(function=show_ads)
deleteParser = subparsers.add_parser('delete', help='delete a listed ad')
deleteParser.add_argument('id',type=str, help='id of the ad you wish to delete')
deleteParser.set_defaults(function=delete_ad)
nukeParser = subparsers.add_parser('nuke', help='delete all ads')
nukeParser.set_defaults(function=nuke)
repostParser = subparsers.add_parser('repost', help='repost an existing ad')
repostParser.add_argument('inf_file', type = str,help = '.inf file containing posting details')
repostParser.set_defaults(function=repost_ad)
args = parser.parse_args()
#try:
args.function(args)
#except AttributeError as err:
# print(err)
# parser.print_help()
#HELPER FUNCTIONS
def get_folder_data(args):
args.inf_file = "item.inf"
cred_file = args.folderName+"/login.inf"
f = open(cred_file, 'r')
creds = [line.strip() for line in f]
args.username = creds[0]
args.password = creds[1]
def get_inf_details(inf_file):
with open(inf_file, 'rt') as infFileLines:
data = {key: val for line in infFileLines for (key, val) in (line.strip().split("="),)}
files = [open(picture, 'rb').read() for picture in data['imageCsv'].split(",")]
return [data, files]
##Actual Functions called from main
def post_folder(args):
get_folder_data(args)
os.chdir(args.folderName)
post_ad(args)
def post_ad(args):
[data, imageFiles] = get_inf_details(args.inf_file)
api = kijiji_api.KijijiApi()
api.login(args.username, args.password)
api.post_ad_using_data(data, imageFiles)
def show_ads(args):
api = kijiji_api.KijijiApi()
api.login(args.username, args.password)
[print("{} '{}'".format(adId, adName)) for adName, adId in api.get_all_ads()]
def delete_ad(args):
api = kijiji_api.KijijiApi()
api.login(args.username, args.password)
api.delete_ad(args.id)
def delete_ad_using_title(name):
api = kijiji_api.KijijiApi()
api.delete_ad_using_title(name)
#Try to delete ad with same name if possible
#post new ad
def repost_ad(args):
api = kijiji_api.KijijiApi()
api.login(args.username, args.password)
delAdName = ""
for line in open(args.inf_file, 'rt'):
[key, val] = line.strip().rstrip("\n").split("=")
if key =='postAdForm.title':
delAdName = val
try:
api.delete_ad_using_title(delAdName)
print("Existing ad deleted before reposting")
except kijiji_api.DeleteAdException:
print("Did not find an existing ad with matching title, skipping ad deletion")
pass
# Must wait a bit before posting the same ad even after deleting it, otherwise Kijiji will automatically remove it
sleep(180)
post_ad(args)
def repost_folder(args):
get_folder_data(args)
os.chdir(args.folderName)
repost_ad(args)
def nuke(args):
api = kijiji_api.KijijiApi()
api.login(args.username, args.password)
allAds = api.get_all_ads()
[api.delete_ad(adId) for adName, adId in allAds]
if __name__ == "__main__":
main()
|
mit
| -5,459,380,662,855,144,000
| 32.485075
| 118
| 0.682416
| false
| 3.253807
| false
| false
| false
|
Ophiuchus1312/enigma2-master
|
Navigation.py
|
1
|
7217
|
from enigma import eServiceCenter, eServiceReference, eTimer, pNavigation, getBestPlayableServiceReference, iPlayableService
from Components.ParentalControl import parentalControl
from Components.config import config
from Tools.BoundFunction import boundFunction
from Tools.StbHardware import setFPWakeuptime, getFPWakeuptime, getFPWasTimerWakeup
from time import time
import RecordTimer
import PowerTimer
import Screens.Standby
import NavigationInstance
import ServiceReference
from Screens.InfoBar import InfoBar, MoviePlayer
from os import path
# TODO: remove pNavgation, eNavigation and rewrite this stuff in python.
class Navigation:
def __init__(self, nextRecordTimerAfterEventActionAuto=False, nextPowerManagerAfterEventActionAuto=False):
if NavigationInstance.instance is not None:
raise NavigationInstance.instance
NavigationInstance.instance = self
self.ServiceHandler = eServiceCenter.getInstance()
import Navigation as Nav
Nav.navcore = self
self.pnav = pNavigation()
self.pnav.m_event.get().append(self.dispatchEvent)
self.pnav.m_record_event.get().append(self.dispatchRecordEvent)
self.event = [ ]
self.record_event = [ ]
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
self.currentlyPlayingService = None
self.RecordTimer = RecordTimer.RecordTimer()
self.PowerTimer = PowerTimer.PowerTimer()
self.__wasTimerWakeup = False
self.__wasRecTimerWakeup = False
self.__wasPowerTimerWakeup = False
if getFPWasTimerWakeup():
self.__wasTimerWakeup = True
if nextRecordTimerAfterEventActionAuto and abs(self.RecordTimer.getNextRecordingTime() - time()) <= 360:
self.__wasRecTimerWakeup = True
print 'RECTIMER: wakeup to standby detected.'
f = open("/tmp/was_rectimer_wakeup", "w")
f.write('1')
f.close()
# as we woke the box to record, place the box in standby.
self.standbytimer = eTimer()
self.standbytimer.callback.append(self.gotostandby)
self.standbytimer.start(15000, True)
elif nextPowerManagerAfterEventActionAuto:
self.__wasPowerTimerWakeup = True
print 'POWERTIMER: wakeup to standby detected.'
f = open("/tmp/was_powertimer_wakeup", "w")
f.write('1')
f.close()
# as a PowerTimer WakeToStandby was actiond to it.
self.standbytimer = eTimer()
self.standbytimer.callback.append(self.gotostandby)
self.standbytimer.start(15000, True)
def wasTimerWakeup(self):
return self.__wasTimerWakeup
def wasRecTimerWakeup(self):
return self.__wasRecTimerWakeup
def wasPowerTimerWakeup(self):
return self.__wasPowerTimerWakeup
def gotostandby(self):
print 'TIMER: now entering standby'
from Tools import Notifications
Notifications.AddNotification(Screens.Standby.Standby)
def dispatchEvent(self, i):
for x in self.event:
x(i)
if i == iPlayableService.evEnd:
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
self.currentlyPlayingService = None
def dispatchRecordEvent(self, rec_service, event):
# print "record_event", rec_service, event
for x in self.record_event:
x(rec_service, event)
def playService(self, ref, checkParentalControl = True, forceRestart = False):
oldref = self.currentlyPlayingServiceReference
if ref and oldref and ref == oldref and not forceRestart:
print "ignore request to play already running service(1)"
return 0
print "playing", ref and ref.toString()
if path.exists("/proc/stb/lcd/symbol_signal") and config.lcd.mode.getValue() == '1':
try:
if ref.toString().find('0:0:0:0:0:0:0:0:0') == -1:
signal = 1
else:
signal = 0
f = open("/proc/stb/lcd/symbol_signal", "w")
f.write(str(signal))
f.close()
except:
f = open("/proc/stb/lcd/symbol_signal", "w")
f.write("0")
f.close()
elif path.exists("/proc/stb/lcd/symbol_signal") and config.lcd.mode.getValue() == '0':
f = open("/proc/stb/lcd/symbol_signal", "w")
f.write("0")
f.close()
if ref is None:
self.stopService()
return 0
InfoBarInstance = InfoBar.instance
if not checkParentalControl or parentalControl.isServicePlayable(ref, boundFunction(self.playService, checkParentalControl = False)):
if ref.flags & eServiceReference.isGroup:
if not oldref:
oldref = eServiceReference()
playref = getBestPlayableServiceReference(ref, oldref)
print "playref", playref
if playref and oldref and playref == oldref and not forceRestart:
print "ignore request to play already running service(2)"
return 0
if not playref or (checkParentalControl and not parentalControl.isServicePlayable(playref, boundFunction(self.playService, checkParentalControl = False))):
self.stopService()
return 0
else:
playref = ref
if self.pnav:
self.pnav.stopService()
self.currentlyPlayingServiceReference = playref
self.currentlyPlayingServiceOrGroup = ref
if InfoBarInstance is not None:
InfoBarInstance.servicelist.servicelist.setCurrent(ref)
if self.pnav.playService(playref):
print "Failed to start", playref
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
return 0
elif oldref:
InfoBarInstance.servicelist.servicelist.setCurrent(oldref)
return 1
def getCurrentlyPlayingServiceReference(self):
return self.currentlyPlayingServiceReference
def getCurrentlyPlayingServiceOrGroup(self):
return self.currentlyPlayingServiceOrGroup
def isMovieplayerActive(self):
MoviePlayerInstance = MoviePlayer.instance
if MoviePlayerInstance is not None and self.currentlyPlayingServiceReference.toString().find('0:0:0:0:0:0:0:0:0') != -1:
from Screens.InfoBarGenerics import setResumePoint
setResumePoint(MoviePlayer.instance.session)
MoviePlayerInstance.close()
def recordService(self, ref, simulate=False):
service = None
if not simulate: print "recording service: %s" % (str(ref))
if isinstance(ref, ServiceReference.ServiceReference):
ref = ref.ref
if ref:
if ref.flags & eServiceReference.isGroup:
ref = getBestPlayableServiceReference(ref, eServiceReference(), simulate)
service = ref and self.pnav and self.pnav.recordService(ref, simulate)
if service is None:
print "record returned non-zero"
return service
def stopRecordService(self, service):
ret = self.pnav and self.pnav.stopRecordService(service)
return ret
def getRecordings(self, simulate=False):
return self.pnav and self.pnav.getRecordings(simulate)
def getCurrentService(self):
if not self.currentlyPlayingService:
self.currentlyPlayingService = self.pnav and self.pnav.getCurrentService()
return self.currentlyPlayingService
def stopService(self):
if self.pnav:
self.pnav.stopService()
self.currentlyPlayingServiceReference = None
self.currentlyPlayingServiceOrGroup = None
if path.exists("/proc/stb/lcd/symbol_signal"):
f = open("/proc/stb/lcd/symbol_signal", "w")
f.write("0")
f.close()
def pause(self, p):
return self.pnav and self.pnav.pause(p)
def shutdown(self):
self.RecordTimer.shutdown()
self.PowerTimer.shutdown()
self.ServiceHandler = None
self.pnav = None
def stopUserServices(self):
self.stopService()
|
gpl-2.0
| 6,904,893,535,751,126,000
| 34.033981
| 159
| 0.749758
| false
| 3.361435
| false
| false
| false
|
john5223/bottle-auth
|
auth/controllers/user_controller.py
|
1
|
2915
|
import logging
logger = logging.getLogger(__name__)
from bottle import route, get, post, delete
from bottle import request, response
def error(code, message):
response.status = code
message['status'] = code
return message
get_user_table = lambda db: db.get_table('users', primary_id='userid', primary_type='String(100)')
@get('/users/<userid>')
def get_user(db, userid):
user_table = get_user_table(db)
user = user_table.find_one(userid=userid)
if not user:
return error(404, {'error': 'Not a valid user'})
else:
group_table = db.get_table('groups')
groups = group_table.distinct('name', userid=userid)
user['groups'] =sorted( [x['name'] for x in groups] )
return user
@delete('/users/<userid>')
def delete_user(db, userid):
user_table = get_user_table(db)
user = user_table.find_one(userid=userid)
if not user:
return error(404, {'error': 'userid not found'})
else:
user_table.delete(userid=userid)
return {'status': 200}
@route('/users/<userid>', method=['POST', 'PUT'])
def create_update_user(db, userid):
data = request.json
data_keys = data.keys()
required_fields = ['first_name', 'last_name', 'userid', 'groups']
missing_fields = [x for x in required_fields if x not in data_keys]
extra_fields = [x for x in data_keys if x not in required_fields]
if missing_fields:
return error(400, {'error': 'Missing fields (%s)' % (','.join(missing_fields)) })
if extra_fields:
return error(400, {'error': 'Extra fields (%s)' % (','.join(extra_fields)) })
user_table = get_user_table(db)
existing_user = user_table.find_one(userid=data['userid'])
if request.method == 'POST' and existing_user:
return error(409, {'error': 'User already exists'})
if request.method == 'PUT' and not existing_user:
return error(404, {'error': 'User does not exist'})
#update this user's group membership
userid = data.get('userid')
groups = data.pop('groups')
groups_table = db.get_table('groups')
if request.method == 'POST':
user_insert = user_table.insert(data)
elif request.method == 'PUT':
user_update = user_table.update(data, ['userid'])
for name in groups:
groups_table.upsert(dict(name=name, userid=userid), ['name','userid'])
if request.method == 'PUT':
#get rid of any old groups for this user
params = {}
for counter, group in enumerate(groups,1):
params["group_name" + str(counter)] = group
counter += 1
where_clause = 'name NOT IN(:' + ",:".join(params.keys()) + ')' # b/c sqlalchemy can't use a list!?
params['userid'] = userid
q = '''DELETE FROM groups WHERE userid=:userid AND ''' + where_clause
db.executable.execute(q, params)
return {'status': 200, 'user': get_user(db, userid)}
|
gpl-2.0
| -8,540,600,454,075,682,000
| 34.120482
| 107
| 0.616467
| false
| 3.576687
| false
| false
| false
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KPixmapCache.py
|
1
|
2878
|
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KPixmapCache(): # skipped bases: <class 'sip.wrapper'>
# no doc
def cacheLimit(self, *args, **kwargs): # real signature unknown
pass
def deleteCache(self, *args, **kwargs): # real signature unknown
pass
def discard(self, *args, **kwargs): # real signature unknown
pass
def ensureInited(self, *args, **kwargs): # real signature unknown
pass
def find(self, *args, **kwargs): # real signature unknown
pass
def insert(self, *args, **kwargs): # real signature unknown
pass
def isEnabled(self, *args, **kwargs): # real signature unknown
pass
def isValid(self, *args, **kwargs): # real signature unknown
pass
def loadCustomData(self, *args, **kwargs): # real signature unknown
pass
def loadCustomIndexHeader(self, *args, **kwargs): # real signature unknown
pass
def loadFromFile(self, *args, **kwargs): # real signature unknown
pass
def loadFromSvg(self, *args, **kwargs): # real signature unknown
pass
def recreateCacheFiles(self, *args, **kwargs): # real signature unknown
pass
def removeEntries(self, *args, **kwargs): # real signature unknown
pass
def removeEntryStrategy(self, *args, **kwargs): # real signature unknown
pass
def setCacheLimit(self, *args, **kwargs): # real signature unknown
pass
def setRemoveEntryStrategy(self, *args, **kwargs): # real signature unknown
pass
def setTimestamp(self, *args, **kwargs): # real signature unknown
pass
def setUseQPixmapCache(self, *args, **kwargs): # real signature unknown
pass
def setValid(self, *args, **kwargs): # real signature unknown
pass
def size(self, *args, **kwargs): # real signature unknown
pass
def timestamp(self, *args, **kwargs): # real signature unknown
pass
def useQPixmapCache(self, *args, **kwargs): # real signature unknown
pass
def writeCustomData(self, *args, **kwargs): # real signature unknown
pass
def writeCustomIndexHeader(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
RemoveLeastRecentlyUsed = 2
RemoveOldest = 0
RemoveSeldomUsed = 1
RemoveStrategy = None # (!) real value is ''
|
gpl-2.0
| -1,374,738,902,669,333,000
| 26.941748
| 101
| 0.646977
| false
| 3.915646
| false
| false
| false
|
EclipseXuLu/DataHouse
|
DataHouse/crawler/university_spider.py
|
1
|
3941
|
import requests
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd
from io import StringIO, BytesIO
university_list = []
class University():
def __init__(self, name='', is_985=False, is_211=False, has_institute=False, location='', orgnization='',
education_level='', education_type='', university_type=''):
self.name = name
self.is_985 = is_985
self.is_211 = is_211
self.has_institute = has_institute
self.location = location
self.orgnization = orgnization
self.education_level = education_level
self.education_type = education_type
self.university_type = university_type
def __str__(self):
return "{ " + str(self.name) + " ;" + str(self.is_985) + " ;" + str(self.is_211) + " ;" + str(
self.has_institute) + " ;" + self.location + " ;" + self.orgnization + " ;" + self.education_level + " ;" \
+ self.education_type + " ;" + self.university_type + " }"
def crawl(page_url):
headers = {
'Host': 'gaokao.chsi.com.cn',
'Upgrade-Insecure-Requests': '1',
'Referer': 'http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-0.dhtml',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36'
}
response = requests.get(page_url, timeout=20, headers=headers)
if response.status_code == 200:
html_raw = response.text
soup = BeautifulSoup(html_raw, 'html5lib')
parser = etree.HTMLParser()
tree = etree.parse(StringIO(html_raw), parser)
for tr in soup.find_all(bgcolor="#E1E1E1")[0].find_all('tr', attrs={'bgcolor': '#FFFFFF'}):
try:
name = tr.td.a.text.strip() # 大学名称
detail_url = 'http://gaokao.chsi.com.cn' + tr.td.a['href'] # 详情信息页面
is_985 = True if tr.td.find(class_='a211985 span985') is not None else False # 985
is_211 = True if tr.td.find(class_='a211985 span211') is not None else False # 211
has_institute = True if tr.td.find(class_='a211985 spanyan') is not None else False # 研究生院
location = tr.find_all('td')[1].get_text().strip() # 学校地址
orgnization = tr.find_all('td')[2].get_text().strip() # 所属机构
education_level = tr.find_all('td')[3].get_text().strip() # 学历层次
education_type = tr.find_all('td')[4].get_text().strip() # 办学类型
university_type = tr.find_all('td')[5].get_text().strip() # 院校类型
university = University(name, is_985, is_211, has_institute, location, orgnization, education_level,
education_type, university_type)
print(university)
university_list.append([name, is_985, is_211, has_institute, location, orgnization, education_level,
education_type, university_type])
except:
pass
else:
print('Error!!')
def output(some_list, filepath):
col = [
u'院校名称',
u'985',
u'211',
u'研究生院',
u'所在地',
u'院校隶属',
u'学历层次',
u'办学类型',
u'院校类型']
df = pd.DataFrame(some_list, columns=col)
df.to_excel(filepath, '大学', index=False)
if __name__ == '__main__':
page_urllist = ['http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-%d.dhtml'
% _ for _ in range(0, 2660, 20)]
# crawl('http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-0.dhtml')
for page_url in page_urllist:
crawl(page_url)
output(university_list, './大学.xlsx')
|
mit
| -7,579,131,018,589,852,000
| 40.423913
| 119
| 0.558908
| false
| 3.000787
| false
| false
| false
|
vinhqdang/algorithms_for_interviews
|
chapter1/problem1_3.py
|
1
|
1291
|
# given a sorted array and a key
# find the first occurrence that larger than k
# return -1 if there is no such element
from problem0 import binary_search
def test_find_first_larger_1 ():
assert (find_first_larger([1,2,3,4,5,5,6],7) == -1)
def test_find_first_larger_2 ():
assert (find_first_larger([1,2,3,4,5,6,7,8],4) == 3)
def test_find_first_larger_3 ():
assert (find_first_larger([1,2,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,6,7,8],4) == 3)
def test_find_first_larger_4 ():
assert (find_first_larger([1,2,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,6,7,8],5) == 17)
def find_first_larger (_arr, _k):
if (len(_arr) == 0):
return -1
if _k < _arr[0] or _k >_arr[-1]:
return -1
l = 0
u = len(_arr) - 1
new_k = _k - 0.5
while u>l:
m = l + int ((u-l)/2)
if _arr[m] == new_k:
return m #never happen
elif _arr[m] < new_k:
l=m+1
elif _arr[m] > new_k:
u=m-1
m = l + int ((u-l)/2)
# print (u)
# print (l)
# print (m)
# print (_arr[m])
# print (_k)
if _arr[m] == _k:
return m
if _arr[m] < _k:
return m+1
if _arr[m] > _k:
return m-1
if __name__ == '__main__':
print (find_first_larger([1,2,3,4,5,6,7,8],4) == 3)
|
gpl-3.0
| 55,772,695,387,996,910
| 24.333333
| 83
| 0.492641
| false
| 2.338768
| false
| false
| false
|
iynaix/manga-downloader-flask
|
manga/spiders/animea.py
|
1
|
1663
|
import datetime
from scrapy.selector import Selector
from .base import BaseSpider as Spider
from manga.items import MangaItem, MangaChapterItem
from utils import extract_link
class AnimeA(Spider):
name = "animea"
allowed_domains = ["animea.net"]
start_urls = [
"http://manga.animea.net/series_old.php",
]
def parse(self, resp):
hxs = Selector(resp)
for manga in hxs.css("a.tooltip_manga"):
item = MangaItem()
item['name'], item['link'] = extract_link(manga)
yield item
class AnimeAChapterSpider(Spider):
name = "animea_chapter"
allowed_domains = ["animea.net"]
# parses the date format
def parsedate(self, s):
# date is in number of days / weeks / months / years ago
s = s.strip().lower().split()
val = int(s[0])
unit = s[1]
if "day" in unit:
delta = val
elif "week" in unit:
delta = val * 7
elif "month" in unit:
delta = val * 30
elif "year" in unit:
delta = val * 365
else:
raise ValueError("Unrecognised unit: %s" % unit)
return datetime.date.today() - datetime.timedelta(delta)
def parse(self, resp):
hxs = Selector(resp)
for row in hxs.css("ul.chapterlistfull > li"):
item = MangaChapterItem()
try:
item["name"], item["link"] = extract_link(row.xpath("a")[0])
dt = row.css("span.date::text")
item["date"] = self.parsedate(dt.extract()[0])
except IndexError:
continue
yield item
|
mit
| -8,047,761,475,780,702,000
| 26.716667
| 76
| 0.549008
| false
| 3.662996
| false
| false
| false
|
libravatar/libravatar
|
libravatar/account/urls.py
|
1
|
5296
|
# Copyright (C) 2011, 2013, 2015, 2016 Francois Marier <francois@libravatar.org>
# Copyright (C) 2010 Francois Marier <francois@libravatar.org>
# Jonathan Harker <jon@jon.geek.nz>
# Brett Wilkins <bushido.katana@gmail.com>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, patterns
# pylint: disable=invalid-name
urlpatterns = patterns('',
url('login/$', 'django.contrib.auth.views.login',
{'template_name': 'account/login.html'},
name='login'),
url('logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/'},
name='logout'), # must be the last pattern using this view!
url('password_change/$',
'django.contrib.auth.views.password_change',
{'template_name': 'account/password_change.html'},
name='password_change'),
url('password_change_done/$',
'django.contrib.auth.views.password_change_done',
{'template_name': 'account/password_change_done.html'},
name='password_change_done'),
url('password_set/$',
'libravatar.account.views.password_set'),
url('add_email/$',
'libravatar.account.views.add_email'),
url('add_openid/$',
'libravatar.account.views.add_openid'),
url('confirm_email/$',
'libravatar.account.views.confirm_email'),
url(r'^(?P<openid_id>\d+)/confirm_openid/$',
'libravatar.account.views.confirm_openid'),
url(r'^(?P<openid_id>\d+)/redirect_openid/$',
'libravatar.account.views.redirect_openid'),
url(r'^(?P<email_id>\d+)/remove_confirmed_email/$',
'libravatar.account.views.remove_confirmed_email'),
url(r'^(?P<email_id>\d+)/remove_unconfirmed_email/$',
'libravatar.account.views.remove_unconfirmed_email'),
url(r'^(?P<openid_id>\d+)/remove_confirmed_openid/$',
'libravatar.account.views.remove_confirmed_openid'),
url(r'^(?P<openid_id>\d+)/remove_unconfirmed_openid/$',
'libravatar.account.views.remove_unconfirmed_openid'),
url('delete/$', 'libravatar.account.views.delete'),
url('export/$', 'libravatar.account.views.export'),
url('new/$', 'libravatar.account.views.new'),
url('password_reset/$',
'libravatar.account.views.password_reset',
name='password_reset'),
url('password_reset_confirm/$',
'libravatar.account.views.password_reset_confirm',
name='password_reset_confirm'),
url('profile/$', 'libravatar.account.views.profile'),
url('profile_success/$',
'libravatar.account.views.successfully_authenticated'),
url(r'^(?P<email_id>\d+)/assign_photo_email/$',
'libravatar.account.views.assign_photo_email'),
url(r'^(?P<openid_id>\d+)/assign_photo_openid/$',
'libravatar.account.views.assign_photo_openid'),
url(r'^(?P<user_id>\d+)/import_photo/$',
'libravatar.account.views.import_photo'),
url('upload_photo/$',
'libravatar.account.views.upload_photo'),
url('crop_photo/$',
'libravatar.account.views.crop_photo'),
url(r'^(?P<photo_id>\d+)/crop_photo/?$',
'libravatar.account.views.crop_photo'),
url(r'^(?P<photo_id>\d+)/auto_crop/?$',
'libravatar.account.views.auto_crop'),
url(r'^(?P<photo_id>\d+)/delete_photo/$',
'libravatar.account.views.delete_photo'),
# Default page
url(r'^$', 'libravatar.account.views.profile'),
)
|
agpl-3.0
| -5,768,908,466,926,725,000
| 55.946237
| 87
| 0.498301
| false
| 4.52263
| false
| false
| false
|
natsheh/semantic_query
|
api.py
|
1
|
4747
|
# -*- coding: utf-8 -*-
#
# This file is part of semantic_query.
# Copyright (C) 2016 CIAPPLE.
#
# This is a free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
# Semantic Query API
# Author: Hussein AL-NATSHEH <h.natsheh@ciapple.com>
# Affiliation: CIAPPLE, Jordan
import os, argparse, pickle, json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
from collections import OrderedDict
from itertools import islice
from bs4 import BeautifulSoup
from flask import Flask, request, make_response
from flask_httpauth import HTTPBasicAuth
from flask_restful import Resource, Api, reqparse
def top(n, sorted_results):
return list(islice(sorted_results.iteritems(), n))
def query_by_text(transformer, transformed, documents, index, query_text, url, n_results=10):
query = transformer.transform(query_text)
sims = cosine_similarity(query.reshape(1,-1), transformed)
scores = sims[0][:].reshape(-1,1)
results= dict()
for i in range(len(transformed)):
results[i] = scores[i]
sorted_results = OrderedDict(sorted(results.items(), key=lambda k: k[1], reverse=True))
topn = top(n_results, sorted_results)
results = np.array(range(n_results), dtype=np.object)
for rank, (answer, score) in enumerate(topn):
title = documents[answer].split('\n__')[0]
title_t = title.replace (" ", "_")
doc_id = str(index[answer])
reference = url + title_t
results[rank] = {'reference': reference, 'score': str(score), 'doc_id': doc_id, 'title': title, 'answer': documents[answer]}
return results.tolist()
class Query(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('question', type=str, required=True, help='Query text')
parser.add_argument('userId', type=str, required=False, help='User ID')
parser.add_argument('questionId', type=str, required=False, help='Question ID')
parser.add_argument('limit', type=int, required=False, help='Size of the returned results')
args = parser.parse_args()
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
user_id = request.args.get('userId')
question_id = request.args.get('questionId')
response = {}
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
response['results'] = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
resp = make_response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['content-type'] = 'application/json'
resp.data = response
return response
except Exception as e:
return {'error': str(e)}
def get(self):
try:
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
user_id = request.args.get('userId')
except:
user_id = 'uid1'
try:
question_id = request.args.get('questionId')
except:
question_id = 'qid1'
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
response = dict()
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
results = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
response['results'] = results
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
return response
except Exception as e:
return {'error': str(e)}
app = Flask(__name__, static_url_path="")
auth = HTTPBasicAuth()
api = Api(app)
api.add_resource(Query, '/Query/')
if __name__ == '__main__':
transformed_file = 'transformed.pickle'
docs_file = 'documents.pickle'
index_file = 'index.pickle'
transformer_file = 'transformer.pickle'
transformed = np.load(transformed_file)
index = pickle.load(open(index_file,'rb'))
documents = pickle.load(open(docs_file,'rb'))
print 'number of documents :', len(index)
transformer = pickle.load(open(transformer_file,'rb'))
url_config = json.load(open('url_config.json', 'r'))
url = url_config['url']
print 'Ready to call!!'
app.run(host='0.0.0.0', threaded=True)
|
bsd-3-clause
| 6,488,388,960,674,293,000
| 30.230263
| 127
| 0.688224
| false
| 3.168892
| false
| false
| false
|
SitiBanc/1061_NCTU_IOMDS
|
1108/HW7/HW7.py
|
1
|
3177
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 20:25:18 2017
@author: sitibanc
"""
import numpy as np
from scipy import signal
from PIL import Image
def gen2DGaussian(stdv, mean, h, w):
x, y = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
d = np.sqrt(x ** 2 + y ** 2)
sigma, mu = stdv, mean
g = np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2)))
return g
def applyMask(M, I_array):
R = I_array[:, :, 0]
R2 = signal.convolve2d(R, M, mode = 'same', boundary = 'symm')
G = I_array[:, :, 1]
G2 = signal.convolve2d(G, M, mode = 'same', boundary = 'symm')
B = I_array[:, :, 2]
B2 = signal.convolve2d(B, M, mode = 'same', boundary = 'symm')
data = I_array.copy()
data[:, :, 0] = R2.astype('uint8')
data[:, :, 1] = G2.astype('uint8')
data[:, :, 2] = B2.astype('uint8')
return data
# 讀圖
I = Image.open('sample.jpg')
data = np.asarray(I)
# =============================================================================
# HW7-1: Gaussian Blur
# =============================================================================
# Generate 2D Gaussian Array
M1 = gen2DGaussian(1.0, 0.0, 10, 10)
M1 = M1 / M1.sum()
# Apply Mask
masked1 = applyMask(M1, data)
I1 = Image.fromarray(masked1.astype('uint8'), 'RGB')
I1.show()
# =============================================================================
# HW7-2: Motion Blur
# =============================================================================
M2 = np.ones((20, 1))
M2 = M2 / M2.sum()
# Apply Mask
masked2 = applyMask(M2, data)
I2 = Image.fromarray(masked2.astype('uint8'), 'RGB')
I2.show()
# =============================================================================
# HW7-3: Sharp Filter(銳化) <-- 兩個標準差不同的Gaussian相減
# =============================================================================
# Generate Mask
#sig1 = gen2DGaussian(1.0, 0.0, 3, 3)
#sig2 = gen2DGaussian(2.0, 0.0, 3, 3)
#M3 = sig1 - sig2
#M3 = M3 / M3.sum()
# Another Mask
M3 = np.array([[-1, -1, -1], [-1, 16, -1], [-1, -1, -1]])
M3 = M3 / 8
# Apply Mask
masked3 = applyMask(M3, data)
I3 = Image.fromarray(masked3.astype('uint8'), 'RGB')
I3.show()
# =============================================================================
# HW7-4: Sobel Filter(邊界強化、類似素描風格)
# =============================================================================
# Gray-scale image
I0 = I.convert('L')
data0 = np.asarray(I0)
# Generate Mask
sobel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
sobel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
# Apply Mask
Ix = signal.convolve2d(data0, sobel_x, mode = 'same', boundary = 'symm')
Iy = signal.convolve2d(data0, sobel_y, mode = 'same', boundary = 'symm')
masked4 = Ix ** 2 + Iy ** 2
# Adjust Color
tmp = masked4.flatten()
tmp[::-1].sort() # sorting in descending order
n = 0.2
idx = int(len(tmp) * n)
for h in range(masked4.shape[0]):
for w in range(masked4.shape[1]):
if masked4[h, w] >= tmp[idx]:
masked4[h, w] = 0
else:
masked4[h, w] = 255
I4 = Image.fromarray(masked4.astype('uint8'), 'L')
I4.show()
|
apache-2.0
| 7,698,695,683,274,534,000
| 30.2
| 79
| 0.460083
| false
| 2.820072
| false
| false
| false
|
wufangjie/leetcode
|
015. 3Sum.py
|
1
|
1747
|
'''
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
'''
def _move_right(sorted_list, lo, hi, val):
while lo < hi:
lo += 1
if sorted_list[lo] != val:
break
return lo
def _move_left(sorted_list, lo, hi, val):
while lo < hi:
hi -= 1
if sorted_list[hi] != val:
break
return hi
def twoSum(sorted_list, lo, hi, theSum):
while lo < hi:
test = sorted_list[lo] + sorted_list[hi]
if test == theSum:
yield [sorted_list[lo], sorted_list[hi]]
lo = _move_right(sorted_list, lo, hi, sorted_list[lo])
hi = _move_left(sorted_list, lo, hi, sorted_list[hi])
elif test > theSum:
hi = _move_left(sorted_list, lo, hi, sorted_list[hi])
else:
lo = _move_right(sorted_list, lo, hi, sorted_list[lo])
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums = sorted(nums)
theMax = len(nums) - 1
pre = float('inf')
results = []
for i, a in enumerate(nums[:-2], 1):
if a > 0:
break
if a != pre:
pre = a
for comb in twoSum(nums, i, theMax, -a):
results.append([a] + comb)
return results
if __name__ == '__main__':
assert Solution().threeSum([-1, 0, 1, 2, -1, -4]) == [[-1, 0, 1], [-1, -1, 2]]
|
gpl-3.0
| 6,233,434,110,019,889,000
| 25.876923
| 155
| 0.502003
| false
| 3.253259
| false
| false
| false
|
satterly/alerta5
|
alerta/app/database/backends/mongodb/base.py
|
1
|
40457
|
import json
import pytz
import re
from datetime import datetime, timedelta
from flask import current_app, g
from pymongo import MongoClient, ASCENDING, TEXT, ReturnDocument
from pymongo.errors import ConnectionFailure
from alerta.app.models import status_code
from alerta.app.utils.format import DateTime
from alerta.app import severity
from alerta.app.exceptions import NoCustomerMatch, ApiError
# See https://github.com/MongoEngine/flask-mongoengine/blob/master/flask_mongoengine/__init__.py
# See https://github.com/dcrosta/flask-pymongo/blob/master/flask_pymongo/__init__.py
class Backend:
def connect(self, config):
conn = MongoClient(config.get('MONGO_URI', 'mongodb://localhost:27017/monitoring'))
if config.get('MONGO_DATABASE', None):
db = conn[config['MONGO_DATABASE']]
else:
db = conn.get_database()
# create unique indexes
db.alerts.create_index(
[('environment', ASCENDING), ('customer', ASCENDING), ('resource', ASCENDING), ('event', ASCENDING)],
unique=True
)
db.alerts.create_index([('$**', TEXT)])
db.heartbeats.create_index([('origin', ASCENDING), ('customer', ASCENDING)], unique=True)
db.metrics.create_index([('group', ASCENDING), ('name', ASCENDING)], unique=True)
return conn, db
@property
def cx(self):
return current_app.extensions['mongodb'][0]
@property
def db(self):
return current_app.extensions['mongodb'][1]
@property
def version(self):
return self.db.client.server_info()['version']
@property
def is_alive(self):
try:
self.db.client.admin.command('ismaster')
except ConnectionFailure:
return False
return True
def close(self):
self.db.close()
def destroy(self, name=None):
name = name or self.db.name
self.cx.drop_database(name)
def build_query(self, params):
query_time = datetime.utcnow()
# q
if params.get('q', None):
query = json.loads(params.pop('q'))
else:
query = dict()
# customer
if g.get('customer', None):
query['customer'] = g.get('customer')
# from-date, to-date
from_date = params.get('from-date', default=None, type=DateTime.parse)
to_date = params.get('to-date', default=query_time, type=DateTime.parse)
if from_date and to_date:
query['lastReceiveTime'] = {'$gt': from_date.replace(tzinfo=pytz.utc), '$lte': to_date.replace(tzinfo=pytz.utc)}
elif to_date:
query['lastReceiveTime'] = {'$lte': to_date.replace(tzinfo=pytz.utc)}
# duplicateCount, repeat
if params.get('duplicateCount', None):
query['duplicateCount'] = params.get('duplicateCount', int)
if params.get('repeat', None):
query['repeat'] = params.get('repeat', default=True, type=lambda x: x == 'true')
# sort-by
sort = list()
direction = 1
if params.get('reverse', None):
direction = -1
if params.get('sort-by', None):
for sort_by in params.getlist('sort-by'):
if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort.append((sort_by, -direction)) # reverse chronological
else:
sort.append((sort_by, direction))
else:
sort.append(('lastReceiveTime', -direction))
# group-by
group = params.getlist('group-by')
# page, page-size, limit (deprecated)
page = params.get('page', 1, int)
limit = params.get('limit', current_app.config['DEFAULT_PAGE_SIZE'], int)
page_size = params.get('page-size', limit, int)
# id
ids = params.getlist('id')
if len(ids) == 1:
query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}]
elif ids:
query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}},
{'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}]
EXCLUDE_QUERY = ['q', 'id', 'from-date', 'to-date', 'repeat', 'sort-by', 'reverse', 'group-by', 'page', 'page-size', 'limit']
# fields
for field in params:
if field in EXCLUDE_QUERY:
continue
value = params.getlist(field)
if len(value) == 1:
value = value[0]
if field.endswith('!'):
if value.startswith('~'):
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$ne'] = value
else:
if value.startswith('~'):
query[field] = dict()
query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field] = value
else:
if field.endswith('!'):
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$nin'] = value
else:
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field] = dict()
query[field]['$regex'] = re.compile(value, re.IGNORECASE)
else:
query[field] = dict()
query[field]['$in'] = value
return query, sort, group, page, page_size, query_time
#### ALERTS
def get_severity(self, alert):
"""
Get severity of correlated alert. Used to determine previous severity.
"""
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
return self.db.alerts.find_one(query, projection={"severity": 1, "_id": 0})['severity']
def get_status(self, alert):
"""
Get status of correlated or duplicate alert. Used to determine previous status.
"""
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event
},
{
"correlate": alert.event,
}
],
"customer": alert.customer
}
return self.db.alerts.find_one(query, projection={"status": 1, "_id": 0})['status']
def is_duplicate(self, alert):
query = {
"environment": alert.environment,
"resource": alert.resource,
"event": alert.event,
"severity": alert.severity,
"customer": alert.customer
}
return bool(self.db.alerts.find_one(query))
def is_correlated(self, alert):
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
return bool(self.db.alerts.find_one(query))
def is_flapping(self, alert, window=1800, count=2):
"""
Return true if alert severity has changed more than X times in Y seconds
"""
pipeline = [
{'$match': {"environment": alert.environment, "resource": alert.resource, "event": alert.event}},
{'$unwind': '$history'},
{'$match': {
"history.updateTime": {'$gt': datetime.utcnow() - timedelta(seconds=window)}},
"history.type": "severity"
},
{
'$group': {
"_id": '$history.type',
"count": {'$sum': 1}
}
}
]
responses = self.db.alerts.aggregate(pipeline)
for r in responses:
if r['count'] > count:
return True
return False
def dedup_alert(self, alert):
"""
Update alert value, text and rawData, increment duplicate count and set repeat=True, and
keep track of last receive id and time but don't append to history unless status changes.
"""
previous_status = self.get_status(alert)
if alert.status != status_code.UNKNOWN and alert.status != previous_status:
status = alert.status
else:
status = status_code.status_from_severity(alert.severity, alert.severity, previous_status)
query = {
"environment": alert.environment,
"resource": alert.resource,
"event": alert.event,
"severity": alert.severity,
"customer": alert.customer
}
now = datetime.utcnow()
update = {
'$set': {
"status": status,
"value": alert.value,
"text": alert.text,
"rawData": alert.raw_data,
"repeat": True,
"lastReceiveId": alert.id,
"lastReceiveTime": now
},
'$addToSet': {"tags": {'$each': alert.tags}},
'$inc': {"duplicateCount": 1}
}
# only update those attributes that are specifically defined
attributes = {'attributes.'+k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if status != previous_status:
update['$push'] = {
"history": {
'$each': [{
"event": alert.event,
"status": status,
"type": "status",
"text": "duplicate alert status change",
"id": alert.id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def correlate_alert(self, alert):
"""
Update alert key attributes, reset duplicate count and set repeat=False, keep track of last
receive id and time, appending all to history. Append to history again if status changes.
"""
previous_severity = self.get_severity(alert)
previous_status = self.get_status(alert)
trend_indication = severity.trend(previous_severity, alert.severity)
if alert.status == status_code.UNKNOWN:
status = status_code.status_from_severity(previous_severity, alert.severity, previous_status)
else:
status = alert.status
query = {
"environment": alert.environment,
"resource": alert.resource,
'$or': [
{
"event": alert.event,
"severity": {'$ne': alert.severity}
},
{
"event": {'$ne': alert.event},
"correlate": alert.event
}],
"customer": alert.customer
}
now = datetime.utcnow()
update = {
'$set': {
"event": alert.event,
"severity": alert.severity,
"status": status,
"value": alert.value,
"text": alert.text,
"createTime": alert.create_time,
"rawData": alert.raw_data,
"duplicateCount": 0,
"repeat": False,
"previousSeverity": previous_severity,
"trendIndication": trend_indication,
"receiveTime": now,
"lastReceiveId": alert.id,
"lastReceiveTime": now
},
'$addToSet': {"tags": {'$each': alert.tags}},
'$push': {
"history": {
'$each': [{
"event": alert.event,
"severity": alert.severity,
"value": alert.value,
"type": "severity",
"text": alert.text,
"id": alert.id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
}
# only update those attributes that are specifically defined
attributes = {'attributes.'+k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if status != previous_status:
update['$push']['history']['$each'].append({
"event": alert.event,
"status": status,
"type": "status",
"text": "correlated alert status change",
"id": alert.id,
"updateTime": now
})
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def create_alert(self, alert):
data = {
"_id": alert.id,
"resource": alert.resource,
"event": alert.event,
"environment": alert.environment,
"severity": alert.severity,
"correlate": alert.correlate,
"status": alert.status,
"service": alert.service,
"group": alert.group,
"value": alert.value,
"text": alert.text,
"tags": alert.tags,
"attributes": alert.attributes,
"origin": alert.origin,
"type": alert.event_type,
"createTime": alert.create_time,
"timeout": alert.timeout,
"rawData": alert.raw_data,
"customer": alert.customer,
"duplicateCount": alert.duplicate_count,
"repeat": alert.repeat,
"previousSeverity": alert.previous_severity,
"trendIndication": alert.trend_indication,
"receiveTime": alert.receive_time,
"lastReceiveId": alert.last_receive_id,
"lastReceiveTime": alert.last_receive_time,
"history": [h.serialize for h in alert.history]
}
if self.db.alerts.insert_one(data).inserted_id == alert.id:
return data
def get_alert(self, id, customer=None):
if len(id) == 8:
query = {'$or': [{'_id': {'$regex': '^' + id}}, {'lastReceiveId': {'$regex': '^' + id}}]}
else:
query = {'$or': [{'_id': id}, {'lastReceiveId': id}]}
if customer:
query['customer'] = customer
return self.db.alerts.find_one(query)
#### STATUS, TAGS, ATTRIBUTES
def set_status(self, id, status, text=None):
"""
Set status and update history.
"""
query = {'_id': {'$regex': '^' + id}}
event = self.db.alerts.find_one(query, projection={"event": 1, "_id": 0})['event']
if not event:
return False
now = datetime.utcnow()
update = {
'$set': {"status": status},
'$push': {
"history": {
'$each': [{
"event": event,
"status": status,
"type": "status",
"text": text,
"id": id,
"updateTime": now
}],
'$slice': -abs(current_app.config['HISTORY_LIMIT'])
}
}
}
return self.db.alerts.find_one_and_update(
query,
update=update,
projection={"history": 0},
return_document=ReturnDocument.AFTER
)
def tag_alert(self, id, tags):
"""
Append tags to tag list. Don't add same tag more than once.
"""
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$addToSet': {"tags": {'$each': tags}}})
return response.matched_count > 0
def untag_alert(self, id, tags):
"""
Remove tags from tag list.
"""
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, {'$pullAll': {"tags": tags}})
return response.matched_count > 0
def update_attributes(self, id, attrs):
"""
Set all attributes (including private attributes) and unset attributes by using a value of 'null'.
"""
update = dict()
set_value = {'attributes.' + k: v for k, v in attrs.items() if v is not None}
if set_value:
update['$set'] = set_value
unset_value = {'attributes.' + k: v for k, v in attrs.items() if v is None}
if unset_value:
update['$unset'] = unset_value
response = self.db.alerts.update_one({'_id': {'$regex': '^' + id}}, update=update)
return response.matched_count > 0
def delete_alert(self, id):
response = self.db.alerts.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
#### SEARCH & HISTORY
def get_alerts(self, query=None, sort=None, page=1, page_size=0):
return self.db.alerts.find(query, sort=sort).skip((page-1)*page_size).limit(page_size)
def get_history(self, query=None, fields=None):
if not fields:
fields = {
"resource": 1,
"event": 1,
"environment": 1,
"customer": 1,
"service": 1,
"group": 1,
"tags": 1,
"attributes": 1,
"origin": 1,
"type": 1,
"history": 1
}
pipeline = [
{'$match': query},
{'$unwind': '$history'},
{'$project': fields},
{'$limit': current_app.config['HISTORY_LIMIT']},
{'$sort': {'history.updateTime': 1}}
]
responses = self.db.alerts.aggregate(pipeline)
history = list()
for response in responses:
if 'severity' in response['history']:
history.append(
{
"id": response['_id'], # or response['history']['id']
"resource": response['resource'],
"event": response['history']['event'],
"environment": response['environment'],
"severity": response['history']['severity'],
"service": response['service'],
"group": response['group'],
"value": response['history']['value'],
"text": response['history']['text'],
"tags": response['tags'],
"attributes": response['attributes'],
"origin": response['origin'],
"updateTime": response['history']['updateTime'],
"type": response['history'].get('type', 'unknown'),
"customer": response.get('customer', None)
}
)
elif 'status' in response['history']:
history.append(
{
"id": response['_id'], # or response['history']['id']
"resource": response['resource'],
"event": response['event'],
"environment": response['environment'],
"status": response['history']['status'],
"service": response['service'],
"group": response['group'],
"text": response['history']['text'],
"tags": response['tags'],
"attributes": response['attributes'],
"origin": response['origin'],
"updateTime": response['history']['updateTime'],
"type": response['history'].get('type', 'unknown'),
"customer": response.get('customer', None)
}
)
return history
#### COUNTS
def get_count(self, query=None):
"""
Return total number of alerts that meet the query filter.
"""
return self.db.alerts.find(query).count()
def get_counts(self, query=None, fields=None, group=None):
pipeline = [
{'$match': query},
{'$project': fields or {}},
{'$group': {"_id": "$" + group, "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
counts = dict()
for response in responses:
counts[response['_id']] = response['count']
return counts
def get_counts_by_severity(self, query=None):
return self.get_counts(query, fields={"severity": 1}, group="severity")
def get_counts_by_status(self, query=None):
return self.get_counts(query, fields={"status": 1}, group="status")
def get_topn_count(self, query=None, group="event", topn=10):
pipeline = [
{'$match': query},
{'$unwind': '$service'},
{
'$group': {
"_id": "$%s" % group,
"count": {'$sum': 1},
"duplicateCount": {'$sum': "$duplicateCount"},
"environments": {'$addToSet': "$environment"},
"services": {'$addToSet': "$service"},
"resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}}
}
},
{'$sort': {"count": -1, "duplicateCount": -1}},
{'$limit': topn}
]
responses = self.db.alerts.aggregate(pipeline)
top = list()
for response in responses:
top.append(
{
"%s" % group: response['_id'],
"environments": response['environments'],
"services": response['services'],
"resources": response['resources'],
"count": response['count'],
"duplicateCount": response['duplicateCount']
}
)
return top
def get_topn_flapping(self, query=None, group="event", topn=10):
pipeline = [
{'$match': query},
{'$unwind': '$service'},
{'$unwind': '$history'},
{'$match': {"history.type": "severity"}},
{
'$group': {
"_id": "$%s" % group,
"count": {'$sum': 1},
"duplicateCount": {'$max': "$duplicateCount"},
"environments": {'$addToSet': "$environment"},
"services": {'$addToSet': "$service"},
"resources": {'$addToSet': {"id": "$_id", "resource": "$resource"}}
}
},
{'$sort': {"count": -1, "duplicateCount": -1}},
{'$limit': topn}
]
responses = self.db.alerts.aggregate(pipeline)
top = list()
for response in responses:
top.append(
{
"%s" % group: response['_id'],
"environments": response['environments'],
"services": response['services'],
"resources": response['resources'],
"count": response['count'],
"duplicateCount": response['duplicateCount']
}
)
return top
#### ENVIRONMENTS
def get_environments(self, query=None, topn=100):
pipeline = [
{'$match': query},
{'$project': {"environment": 1}},
{'$limit': topn},
{'$group': {"_id": "$environment", "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
environments = list()
for response in responses:
environments.append(
{
"environment": response['_id'],
"count": response['count']
}
)
return environments
#### SERVICES
def get_services(self, query=None, topn=100):
pipeline = [
{'$unwind': '$service'},
{'$match': query},
{'$project': {"environment": 1, "service": 1}},
{'$limit': topn},
{'$group': {"_id": {"environment": "$environment", "service": "$service"}, "count": {'$sum': 1}}}
]
responses = self.db.alerts.aggregate(pipeline)
services = list()
for response in responses:
services.append(
{
"environment": response['_id']['environment'],
"service": response['_id']['service'],
"count": response['count']
}
)
return services
#### BLACKOUTS
def create_blackout(self, blackout):
data = {
"_id": blackout.id,
"priority": blackout.priority,
"environment": blackout.environment,
"startTime": blackout.start_time,
"endTime": blackout.end_time,
"duration": blackout.duration
}
if blackout.service:
data["service"] = blackout.service
if blackout.resource:
data["resource"] = blackout.resource
if blackout.event:
data["event"] = blackout.event
if blackout.group:
data["group"] = blackout.group
if blackout.tags:
data["tags"] = blackout.tags
if blackout.customer:
data["customer"] = blackout.customer
if self.db.blackouts.insert_one(data).inserted_id == blackout.id:
return data
def get_blackout(self, id, customer=None):
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.blackouts.find_one(query)
def get_blackouts(self, query=None, page=1, page_size=0):
return self.db.blackouts.find(query).skip((page - 1) * page_size).limit(page_size)
def is_blackout_period(self, alert):
now = datetime.utcnow()
query = dict()
query['startTime'] = {'$lte': now}
query['endTime'] = {'$gt': now}
query['environment'] = alert.environment
query['$or'] = [
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": alert.resource,
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {"$not": {"$elemMatch": {"$nin": alert.service}}},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": alert.event,
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": alert.group,
"tags": {'$exists': False}
},
{
"resource": alert.resource,
"service": {'$exists': False},
"event": alert.event,
"group": {'$exists': False},
"tags": {'$exists': False}
},
{
"resource": {'$exists': False},
"service": {'$exists': False},
"event": {'$exists': False},
"group": {'$exists': False},
"tags": {"$not": {"$elemMatch": {"$nin": alert.tags}}}
}
]
if self.db.blackouts.find_one(query):
return True
if current_app.config['CUSTOMER_VIEWS']:
query['customer'] = alert.customer
if self.db.blackouts.find_one(query):
return True
return False
def delete_blackout(self, id):
response = self.db.blackouts.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
#### HEARTBEATS
def upsert_heartbeat(self, heartbeat):
return self.db.heartbeats.find_one_and_update(
{
"origin": heartbeat.origin,
"customer": heartbeat.customer
},
{
'$setOnInsert': {
"_id": heartbeat.id
},
'$set': {
"origin": heartbeat.origin,
"tags": heartbeat.tags,
"type": heartbeat.event_type,
"createTime": heartbeat.create_time,
"timeout": heartbeat.timeout,
"receiveTime": heartbeat.receive_time,
"customer": heartbeat.customer
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
def get_heartbeat(self, id, customer=None):
if len(id) == 8:
query = {'_id': {'$regex': '^' + id}}
else:
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.heartbeats.find_one(query)
def get_heartbeats(self, query=None, page=1, page_size=0):
return self.db.heartbeats.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_heartbeat(self, id):
response = self.db.heartbeats.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
#### API KEYS
# save
def create_key(self, key):
data = {
"_id": key.key,
"user": key.user,
"scopes": key.scopes,
"text": key.text,
"expireTime": key.expire_time,
"count": key.count,
"lastUsedTime": key.last_used_time
}
if key.customer:
data['customer'] = key.customer
if self.db.keys.insert_one(data).inserted_id == key.key:
return data
# get
def get_key(self, key, customer=None):
query = {'$or': [{'key': key}, {'_id': key}]}
if customer:
query['customer'] = customer
return self.db.keys.find_one(query)
# list
def get_keys(self, query=None, page=1, page_size=0):
return self.db.keys.find(query).skip((page - 1) * page_size).limit(page_size)
# update
def update_key_last_used(self, key):
return self.db.keys.update_one(
{'$or': [{'key': key}, {'_id': key}]},
{
'$set': {"lastUsedTime": datetime.utcnow()},
'$inc': {"count": 1}
}
).matched_count == 1
# delete
def delete_key(self, key):
query = {'$or': [{'key': key}, {'_id': key}]}
response = self.db.keys.delete_one(query)
return True if response.deleted_count == 1 else False
#### USERS
def create_user(self, user):
data = {
"_id": user.id,
"name": user.name,
"password": user.password,
"email": user.email,
"createTime": user.create_time,
"lastLogin": user.last_login,
"text": user.text,
"email_verified": user.email_verified
}
if self.db.users.insert_one(data).inserted_id == user.id:
return data
# get
def get_user(self, id, customer=None):
query = {'_id': id}
if customer:
query['customer'] = customer
return self.db.users.find_one(query)
# list
def get_users(self, query=None, page=1, page_size=0):
return self.db.users.find(query).skip((page - 1) * page_size).limit(page_size)
def get_user_by_email(self, email):
query = {"email": email}
return self.db.users.find_one(query)
def get_user_by_hash(self, hash):
query = {"hash": hash}
return self.db.users.find_one(query)
def get_user_password(self, id):
return
def update_last_login(self, id):
return self.db.users.update_one(
{"_id": id},
update={'$set': {"lastLogin": datetime.utcnow()}}
).matched_count == 1
def set_email_hash(self, id, hash):
return self.db.users.update_one(
{"_id": id},
update={'$set': {'hash': hash, 'updateTime': datetime.utcnow()}}
).matched_count == 1
def update_user(self, id, **kwargs):
return self.db.users.find_one_and_update(
{"_id": id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_user(self, id):
response = self.db.users.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
#### PERMISSIONS
def create_perm(self, perm):
data = {
"_id": perm.id,
"match": perm.match,
"scopes": perm.scopes
}
if self.db.perms.insert_one(data).inserted_id == perm.id:
return data
def get_perm(self, id):
query = {'_id': id}
return self.db.perms.find_one(query)
def get_perms(self, query=None, page=1, page_size=0):
return self.db.perms.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_perm(self, id):
response = self.db.perms.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
def get_scopes_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return ['admin', 'read', 'write']
scopes = list()
for match in matches:
response = self.db.perms.find_one({"match": match}, projection={"scopes": 1, "_id": 0})
if response:
scopes.extend(response['scopes'])
return set(scopes) or current_app.config['USER_DEFAULT_SCOPES']
#### CUSTOMERS
def create_customer(self, customer):
data = {
"_id": customer.id,
"match": customer.match,
"customer": customer.customer
}
if self.db.customers.insert_one(data).inserted_id == customer.id:
return data
def get_customer(self, id):
query = {'_id': id}
return self.db.customers.find_one(query)
def get_customers(self, query=None, page=1, page_size=0):
return self.db.customers.find(query).skip((page - 1) * page_size).limit(page_size)
def delete_customer(self, id):
response = self.db.customers.delete_one({"_id": id})
return True if response.deleted_count == 1 else False
def get_customers_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return '*' # all customers
for match in [login] + matches:
response = self.db.customers.find_one({"match": match}, projection={"customer": 1, "_id": 0})
if response:
return response['customer']
raise NoCustomerMatch("No customer lookup configured for user '%s' or '%s'" % (login, ','.join(matches)))
#### METRICS
def get_metrics(self, type=None):
query = {"type": type} if type else {}
return list(self.db.metrics.find(query, {"_id": 0}))
def set_gauge(self, group, name, title=None, description=None, value=0):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"value": value,
"type": "gauge"
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['value']
def get_gauges(self):
from alerta.app.models.metrics import Gauge
return [
Gauge(
group=g.get('group'),
name=g.get('name'),
title=g.get('title', ''),
description=g.get('description', ''),
value=g.get('value', 0)
) for g in self.db.metrics.find({"type": "gauge"}, {"_id": 0})
]
def inc_counter(self, group, name, title=None, description=None, count=1):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"type": "counter"
},
'$inc': {"count": count}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['count']
def get_counters(self):
from alerta.app.models.metrics import Counter
return [
Counter(
group=c.get('group'),
name=c.get('name'),
title=c.get('title', ''),
description=c.get('description', ''),
count=c.get('count', 0)
) for c in self.db.metrics.find({"type": "counter"}, {"_id": 0})
]
def update_timer(self, group, name, title=None, description=None, count=1, duration=0):
return self.db.metrics.find_one_and_update(
{
"group": group,
"name": name
},
{
'$set': {
"group": group,
"name": name,
"title": title,
"description": description,
"type": "timer"
},
'$inc': {"count": count, "totalTime": duration}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
def get_timers(self):
from alerta.app.models.metrics import Timer
return [
Timer(
group=t.get('group'),
name=t.get('name'),
title=t.get('title', ''),
description=t.get('description', ''),
count=t.get('count', 0),
total_time=t.get('totalTime', 0)
) for t in self.db.metrics.find({"type": "timer"}, {"_id": 0})
]
|
apache-2.0
| -2,205,730,669,139,260,000
| 33.756873
| 133
| 0.470994
| false
| 4.378463
| false
| false
| false
|
ckan/ckanext-archiver
|
ckanext/archiver/lib.py
|
1
|
1725
|
import os
import logging
import ckan.plugins as p
from ckanext.archiver.tasks import update_package, update_resource
log = logging.getLogger(__name__)
def compat_enqueue(name, fn, queue, args=None):
u'''
Enqueue a background job using Celery or RQ.
'''
try:
# Try to use RQ
from ckan.plugins.toolkit import enqueue_job
enqueue_job(fn, args=args, queue=queue)
except ImportError:
# Fallback to Celery
import uuid
from ckan.lib.celery_app import celery
celery.send_task(name, args=args + [queue], task_id=str(uuid.uuid4()))
def create_archiver_resource_task(resource, queue):
from pylons import config
if p.toolkit.check_ckan_version(max_version='2.2.99'):
# earlier CKANs had ResourceGroup
package = resource.resource_group.package
else:
package = resource.package
ckan_ini_filepath = os.path.abspath(config['__file__'])
compat_enqueue('archiver.update_resource', update_resource, queue, [ckan_ini_filepath, resource.id])
log.debug('Archival of resource put into celery queue %s: %s/%s url=%r',
queue, package.name, resource.id, resource.url)
def create_archiver_package_task(package, queue):
from pylons import config
ckan_ini_filepath = os.path.abspath(config['__file__'])
compat_enqueue('archiver.update_package', update_package, queue, [ckan_ini_filepath, package.id])
log.debug('Archival of package put into celery queue %s: %s',
queue, package.name)
def get_extra_from_pkg_dict(pkg_dict, key, default=None):
for extra in pkg_dict.get('extras', []):
if extra['key'] == key:
return extra['value']
return default
|
mit
| 7,860,166,736,346,997,000
| 30.944444
| 104
| 0.663188
| false
| 3.571429
| false
| false
| false
|
jhermann/rituals
|
src/rituals/util/shell.py
|
1
|
2127
|
# -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Shell command calls.
"""
# Copyright ⓒ 2015 Jürgen Hermann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full LICENSE file and source are available at
# https://github.com/jhermann/rituals
from __future__ import absolute_import, unicode_literals, print_function
import sys
from invoke import run as invoke_run
from invoke import exceptions
from . import notify
def capture(cmd, **kw):
"""Run a command and return its stripped captured output."""
kw = kw.copy()
kw['hide'] = 'out'
if not kw.get('echo', False):
kw['echo'] = False
ignore_failures = kw.pop('ignore_failures', False)
try:
return invoke_run(cmd, **kw).stdout.strip()
except exceptions.Failure as exc:
if not ignore_failures:
notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,))
raise
def run(cmd, **kw):
"""Run a command and flush its output."""
kw = kw.copy()
kw.setdefault('warn', False) # make extra sure errors don't get silenced
report_error = kw.pop('report_error', True)
runner = kw.pop('runner', invoke_run)
try:
return runner(cmd, **kw)
except exceptions.Failure as exc:
sys.stdout.flush()
sys.stderr.flush()
if report_error:
notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,))
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
|
gpl-2.0
| 1,718,451,169,136,169,700
| 31.676923
| 96
| 0.669021
| false
| 3.840868
| false
| false
| false
|
Nth-iteration-labs/streamingbandit
|
app/handlers/evalhandlers.py
|
1
|
3985
|
# -* coding: utf-8 -*-
import json
import numpy as np
from handlers.basehandler import BaseHandler, ExceptionHandler
from core.experiment import Experiment
global numpy
global random
class Simulate(BaseHandler):
def get(self, exp_id):
""" Simulate your experiment based on four scripts, which create a closed feedback loop.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
|http://example.com/eval/EXP_ID/simulate?N=1000&log_stats=True |
|&verbose=True&seed=10 |
+--------------------------------------------------------------------+
:requires: A secure cookie, obtained by logging in.
:param int exp_id: Experiment ID as specified in the url.
:param int N: The number of simulation draws.
:param bool log_stats: Flag for logging the results in the database (default is False)
:param bool verbose: Flag for displaying the results in the returning JSON object (default is True)
:param int seed (optional): Set numpy seed.
:returns: A JSON indicating success when verbose flag is False, and a JSON with all the data when verbose flag is True.
:raises 400: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
N = int(self.get_argument("N", default = 1000))
log_stats = self.get_argument("log_stats", default = False)
verbose = self.get_argument("verbose", default = True)
seed = self.get_argument("seed", default = None)
if seed is None:
seed = np.random.randint(2**32-1, dtype=np.uint32)
if verbose == "True":
verbose = True
else:
verbose = False
if log_stats == "True":
log_stats = True
else:
log_stats = False
__EXP__ = Experiment(exp_id)
data = {}
with self.temp_seed(int(seed)):
for i in range(N):
# Generate context
context = __EXP__.run_context_code()
# Get action
action = __EXP__.run_action_code(context, {})
# Generate reward
reward = __EXP__.run_get_reward_code(context, action)
# Set reward
__EXP__.run_reward_code(context, action, reward)
# Get theta
theta = __EXP__.get_theta()
# Save stats
data[str(i)] = {'context' : context.copy(), 'action' : action.copy(), 'reward' : reward.copy(), 'theta' : theta.copy()}
context.clear()
action.clear()
reward.clear()
if log_stats == True:
__EXP__.log_simulation_data(data.copy())
data_tmp = data.copy()
data.clear()
if verbose == True:
self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'data':data_tmp}))
else:
self.write(json.dumps({'simulate':'success', 'experiment':exp_id, 'theta':theta}))
else:
raise ExceptionHandler(reason="Experiment could not be validated.", status_code=401)
else:
raise ExceptionHandler(reason="Could not validate user.", status_code=401)
|
mit
| -3,311,606,737,012,722,000
| 42.791209
| 143
| 0.473275
| false
| 5.018892
| false
| false
| false
|
brettcs/diffoscope
|
diffoscope/comparators/utils/archive.py
|
1
|
3833
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import abc
import logging
from diffoscope.profiling import profile
from diffoscope.tempfiles import get_temporary_directory
from ..missing_file import MissingFile
from .file import File
from .container import Container
logger = logging.getLogger(__name__)
class Archive(Container, metaclass=abc.ABCMeta):
def __new__(cls, source, *args, **kwargs):
if isinstance(source, MissingFile):
return super(Container, MissingArchive).__new__(MissingArchive)
else:
return super(Container, cls).__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with profile('open_archive', self):
self._archive = self.open_archive()
def __del__(self):
with profile('close_archive', self):
self.close_archive()
@property
def archive(self):
return self._archive
@abc.abstractmethod
def open_archive(self):
raise NotImplementedError()
@abc.abstractmethod
def close_archive(self):
raise NotImplementedError()
@abc.abstractmethod
def get_member_names(self):
raise NotImplementedError()
@abc.abstractmethod
def extract(self, member_name, dest_dir):
raise NotImplementedError()
def get_member(self, member_name):
return ArchiveMember(self, member_name)
class ArchiveMember(File):
def __init__(self, container, member_name):
super().__init__(container=container)
self._name = member_name
self._temp_dir = None
self._path = None
@property
def path(self):
if self._path is None:
logger.debug("Unpacking %s", self._name)
assert self._temp_dir is None
self._temp_dir = get_temporary_directory()
with profile('container_extract', self.container):
self._path = self.container.extract(self._name, self._temp_dir.name)
return self._path
def cleanup(self):
if self._path is not None:
self._path = None
if self._temp_dir is not None:
self._temp_dir.cleanup()
self._temp_dir = None
super().cleanup()
def is_directory(self):
return False
def is_symlink(self):
return False
def is_device(self):
return False
class MissingArchiveLikeObject(object):
def getnames(self):
return []
def list(self, *args, **kwargs):
return ''
def close(self):
pass
class MissingArchive(Archive):
@property
def source(self):
return None
def open_archive(self):
return MissingArchiveLikeObject()
def close_archive(self):
pass
def get_member_names(self):
return []
def extract(self, member_name, dest_dir):
# should never be called
raise NotImplementedError()
def get_member(self, member_name):
return MissingFile('/dev/null')
# Be nice to gzip and the likes
@property
def path(self):
return '/dev/null'
|
gpl-3.0
| 5,791,410,520,121,028,000
| 25.611111
| 84
| 0.63857
| false
| 4.12931
| false
| false
| false
|
edx/pyrasite
|
pyrasite/inspector.py
|
1
|
1168
|
# This file is part of pyrasite.
#
# pyrasite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyrasite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyrasite. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011, 2012 Red Hat, Inc., Luke Macken <lmacken@redhat.com>
import subprocess
def inspect(pid, address):
"Return the value of an object in a given process at the specified address"
cmd = ' '.join([
'gdb --quiet -p %s -batch' % pid,
'-eval-command="print (PyObject *)%s"' % address,
])
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in p.communicate()[0].split('\n'):
if line.startswith('$1 = '):
return line[5:]
|
gpl-3.0
| 1,011,668,733,586,748,900
| 39.275862
| 79
| 0.694349
| false
| 3.719745
| false
| false
| false
|
ojii/django-shop
|
tests/testapp/settings.py
|
1
|
4005
|
# Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Christopher Glass', 'tribaal@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h2%uf!luks79rw^4!5%q#v2znc87g_)@^jf1og!04@&&tsf7*9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
import django
if django.VERSION[0] < 1 or django.VERSION[1] <3:
MIDDLEWARE_CLASSES.append('cbv.middleware.DeferredRenderingMiddleware')
ROOT_URLCONF = 'testapp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'polymorphic', # We need polymorphic installed for the shop
'shop', # The django SHOP application
'shop.addressmodel',
'project', # the test project application
)
# The shop settings:
SHOP_CART_MODIFIERS= ['shop.cart.modifiers.rebate_modifiers.BulkRebateModifier']
SHOP_SHIPPING_BACKENDS=['shop.shipping.backends.flat_rate.FlatRateShipping']
# Shop module settings
SHOP_SHIPPING_FLAT_RATE = '10' # That's just for the flat rate shipping backend
|
bsd-3-clause
| -8,624,996,170,369,027,000
| 34.758929
| 122
| 0.698127
| false
| 3.608108
| false
| false
| false
|
cgqyh/pyalgotrade-mod
|
pyalgotrade/tools/quandl.py
|
1
|
5711
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import os
from pyalgotrade import bar
from pyalgotrade.barfeed import quandlfeed
from pyalgotrade.utils import dt
from pyalgotrade.utils import csvutils
import pyalgotrade.logger
# http://www.quandl.com/help/api
def download_csv(sourceCode, tableCode, begin, end, frequency, authToken):
url = "http://www.quandl.com/api/v1/datasets/%s/%s.csv" % (sourceCode, tableCode)
params = {
"trim_start": begin.strftime("%Y-%m-%d"),
"trim_end": end.strftime("%Y-%m-%d"),
"collapse": frequency
}
if authToken is not None:
params["auth_token"] = authToken
return csvutils.download_csv(url, params)
def download_daily_bars(sourceCode, tableCode, year, csvFile, authToken=None):
"""Download daily bars from Quandl for a given year.
:param sourceCode: The dataset's source code.
:type sourceCode: string.
:param tableCode: The dataset's table code.
:type tableCode: string.
:param year: The year.
:type year: int.
:param csvFile: The path to the CSV file to write.
:type csvFile: string.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
"""
bars = download_csv(sourceCode, tableCode, datetime.date(year, 1, 1), datetime.date(year, 12, 31), "daily", authToken)
f = open(csvFile, "w")
f.write(bars)
f.close()
def download_weekly_bars(sourceCode, tableCode, year, csvFile, authToken=None):
"""Download weekly bars from Quandl for a given year.
:param sourceCode: The dataset's source code.
:type sourceCode: string.
:param tableCode: The dataset's table code.
:type tableCode: string.
:param year: The year.
:type year: int.
:param csvFile: The path to the CSV file to write.
:type csvFile: string.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
"""
begin = dt.get_first_monday(year) - datetime.timedelta(days=1) # Start on a sunday
end = dt.get_last_monday(year) - datetime.timedelta(days=1) # Start on a sunday
bars = download_csv(sourceCode, tableCode, begin, end, "weekly", authToken)
f = open(csvFile, "w")
f.write(bars)
f.close()
def build_feed(sourceCode, tableCodes, fromYear, toYear, storage, frequency=bar.Frequency.DAY, timezone=None, skipErrors=False, noAdjClose=False, authToken=None):
"""Build and load a :class:`pyalgotrade.barfeed.quandlfeed.Feed` using CSV files downloaded from Quandl.
CSV files are downloaded if they haven't been downloaded before.
:param sourceCode: The dataset source code.
:type sourceCode: string.
:param tableCodes: The dataset table codes.
:type tableCodes: list.
:param fromYear: The first year.
:type fromYear: int.
:param toYear: The last year.
:type toYear: int.
:param storage: The path were the files will be loaded from, or downloaded to.
:type storage: string.
:param frequency: The frequency of the bars. Only **pyalgotrade.bar.Frequency.DAY** or **pyalgotrade.bar.Frequency.WEEK**
are supported.
:param timezone: The default timezone to use to localize bars. Check :mod:`pyalgotrade.marketsession`.
:type timezone: A pytz timezone.
:param skipErrors: True to keep on loading/downloading files in case of errors.
:type skipErrors: boolean.
:param noAdjClose: True if the instruments don't have adjusted close values.
:type noAdjClose: boolean.
:param authToken: Optional. An authentication token needed if you're doing more than 50 calls per day.
:type authToken: string.
:rtype: :class:`pyalgotrade.barfeed.quandlfeed.Feed`.
"""
logger = pyalgotrade.logger.getLogger("quandl")
ret = quandlfeed.Feed(frequency, timezone)
if noAdjClose:
ret.setNoAdjClose()
if not os.path.exists(storage):
logger.info("Creating %s directory" % (storage))
os.mkdir(storage)
for year in range(fromYear, toYear+1):
for tableCode in tableCodes:
fileName = os.path.join(storage, "%s-%s-%d-quandl.csv" % (sourceCode, tableCode, year))
if not os.path.exists(fileName):
logger.info("Downloading %s %d to %s" % (tableCode, year, fileName))
try:
if frequency == bar.Frequency.DAY:
download_daily_bars(sourceCode, tableCode, year, fileName, authToken)
elif frequency == bar.Frequency.WEEK:
download_weekly_bars(sourceCode, tableCode, year, fileName, authToken)
else:
raise Exception("Invalid frequency")
except Exception, e:
if skipErrors:
logger.error(str(e))
continue
else:
raise e
ret.addBarsFromCSV(tableCode, fileName)
return ret
|
apache-2.0
| -7,145,280,726,221,592,000
| 38.386207
| 162
| 0.66941
| false
| 3.653871
| false
| false
| false
|
evancasey/demeter
|
demeter/unsup/common/image_pool.py
|
1
|
1090
|
import tensorflow as tf
import copy
class ImagePool:
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
ret_imgs = []
for i in range(images.shape[0]):
image = tf.expand_dims(images[i], axis=0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
ret_imgs.append(image)
else:
p = tf.random_uniform((1,), 0, 1).numpy()[0]
if p > 0.5:
random_id = tf.random_uniform((1,), 0, self.pool_size - 1).numpy()[0].astype(int)
tmp = copy.copy(self.images[random_id])
self.images[random_id] = image
ret_imgs.append(tmp)
else:
ret_imgs.append(image)
ret_imgs = tf.concat(ret_imgs, 0)
return ret_imgs
|
mit
| 6,230,843,943,123,238,000
| 33.0625
| 101
| 0.480734
| false
| 3.633333
| false
| false
| false
|
mvaled/sentry
|
src/sentry/south_migrations/0277_auto__add_commitfilechange__add_unique_commitfilechange_commit_filenam.py
|
1
|
92625
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommitFileChange'
db.create_table(
'sentry_commitfilechange', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'organization_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
db_index=True
)
), (
'commit', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Commit']
)
), ('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
)
)
db.send_create_signal('sentry', ['CommitFileChange'])
# Adding unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.create_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Adding field 'Repository.url'
db.add_column(
'sentry_repository',
'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False
)
# Adding field 'Repository.provider'
db.add_column(
'sentry_repository',
'provider',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False
)
# Adding field 'Repository.external_id'
db.add_column(
'sentry_repository',
'external_id',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False
)
# Adding field 'Repository.config'
db.add_column(
'sentry_repository',
'config',
self.gf('sentry.db.models.fields.jsonfield.JSONField')(default={}),
keep_default=False
)
# Adding field 'Repository.status'
db.add_column(
'sentry_repository',
'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
default=0, db_index=True
),
keep_default=False
)
# Adding unique constraint on 'Repository', fields ['organization_id',
# 'provider', 'external_id']
db.create_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
def backwards(self, orm):
# Removing unique constraint on 'Repository', fields ['organization_id',
# 'provider', 'external_id']
db.delete_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
# Removing unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.delete_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Deleting model 'CommitFileChange'
db.delete_table('sentry_commitfilechange')
# Deleting field 'Repository.url'
db.delete_column('sentry_repository', 'url')
# Deleting field 'Repository.provider'
db.delete_column('sentry_repository', 'provider')
# Deleting field 'Repository.external_id'
db.delete_column('sentry_repository', 'external_id')
# Deleting field 'Repository.config'
db.delete_column('sentry_repository', 'config')
# Deleting field 'Repository.status'
db.delete_column('sentry_repository', 'status')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 11, 29, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'nWSQmbINKkiwvRzlFaq4iWFfAr22O7g3'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
bsd-3-clause
| -4,519,465,274,450,707,000
| 35.799762
| 97
| 0.40068
| false
| 4.716141
| false
| false
| false
|
sassoftware/mint
|
mint/buildtypes.py
|
1
|
14373
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pyflakes=ignore-file
import sys
from conary.deps import deps
validBuildTypes = {
'BOOTABLE_IMAGE' : 0,
'INSTALLABLE_ISO' : 1,
'STUB_IMAGE' : 2,
'RAW_FS_IMAGE' : 3,
'NETBOOT_IMAGE' : 4,
'TARBALL' : 5,
'LIVE_ISO' : 6,
'RAW_HD_IMAGE' : 7,
'VMWARE_IMAGE' : 8,
'VMWARE_ESX_IMAGE' : 9,
'VIRTUAL_PC_IMAGE' : 10,
'XEN_OVA' : 11,
'VIRTUAL_IRON' : 12,
'PARALLELS' : 13,
'AMI' : 14,
'UPDATE_ISO' : 15,
'APPLIANCE_ISO' : 16,
'IMAGELESS' : 17,
'VMWARE_OVF_IMAGE' : 18,
'WINDOWS_ISO' : 19,
'WINDOWS_WIM' : 20,
'DEFERRED_IMAGE' : 21,
'DOCKER_IMAGE' : 22,
}
TYPES = validBuildTypes.values()
# add all the defined image types directly to the module so that the standard
# approach of "buildtypes.IMAGE_TYPE" will result in the expected enum
sys.modules[__name__].__dict__.update(validBuildTypes)
deprecatedBuildTypes = {
'QEMU_IMAGE' : RAW_HD_IMAGE
}
windowsBuildTypes = set([
WINDOWS_ISO,
WINDOWS_WIM,
])
#
# These are identifying pieces of information that we can extract from the
# flavor of a build, but not necessarily tied to any particular build type.
#
# These can sometimes be used as a buildType, indexes starting at 100.
#
flavorFlags = {
'XEN_DOMU': 100,
'APPLIANCE': 101,
}
FLAG_TYPES = flavorFlags.values()
flavorFlagsFromId = dict((x[1], x[0]) for x in flavorFlags.items())
sys.modules[__name__].__dict__.update(flavorFlags)
flavorFlagFlavors = {
XEN_DOMU: "use: xen, domU",
APPLIANCE: "use: appliance",
}
flavorFlagNames = {
XEN_DOMU: "DomU",
APPLIANCE: "Appliance",
}
#BOOTABLE_IMAGE Should never get stored in the DB and therefore doesn't need a name
# NOTA BENE: Using Latin-1 here is harmful to XML-RPC which expects UTF-8
# Until we figure out the root cause, use "(R)" for registered trademark here.
typeNames = {
NETBOOT_IMAGE: "Netboot Image",
INSTALLABLE_ISO: "Installable CD/DVD",
RAW_FS_IMAGE: "Raw Filesystem Image",
STUB_IMAGE: "Stub Image",
RAW_HD_IMAGE: "Raw Hard Disk Image",
VMWARE_IMAGE: "VMware (R) Virtual Appliance",
VMWARE_ESX_IMAGE: "VMware (R) ESX Server Virtual Appliance",
VMWARE_OVF_IMAGE: "VMware (R) Virtual Appliance OVF",
LIVE_ISO: "Demo CD/DVD (Live CD/DVD)",
TARBALL: "Compressed Tar File",
VIRTUAL_PC_IMAGE: "VHD for Microsoft (R) Hyper-V",
XEN_OVA: "Citrix XenServer (TM) Appliance",
VIRTUAL_IRON: "Virtual Iron Virtual Appliance",
PARALLELS: "Parallels Virtual Appliance",
AMI: "Amazon Machine Image (EC2)",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Installable ISO",
DEFERRED_IMAGE: "Layered Image",
WINDOWS_ISO: "Windows Installable ISO",
WINDOWS_WIM: "Windows Imaging Format (WIM)",
IMAGELESS: "Online Update",
DOCKER_IMAGE: "Docker Image",
}
typeNamesShort = {
NETBOOT_IMAGE: "Netboot",
INSTALLABLE_ISO: "Inst CD/DVD",
RAW_FS_IMAGE: "Raw FS",
STUB_IMAGE: "Stub",
RAW_HD_IMAGE: "HDD",
VMWARE_IMAGE: "VMware (R)",
VMWARE_ESX_IMAGE: "VMware (R) ESX",
LIVE_ISO: "Demo CD/DVD",
TARBALL: "Tar",
VIRTUAL_PC_IMAGE: "Microsoft (R) Hyper-V",
XEN_OVA: "Citrix XenServer (TM)",
VIRTUAL_IRON: "Virtual Iron",
PARALLELS: "Parallels",
AMI: "AMI",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Inst",
DEFERRED_IMAGE: "Layered",
WINDOWS_ISO: "Windows Inst",
WINDOWS_WIM: "Windows WIM",
IMAGELESS: "Online Update",
VMWARE_OVF_IMAGE: "VMware (R) OVF",
DOCKER_IMAGE: "Docker",
}
# To be used to map image types ids from XML tag names
# used the build definition contained within the
# product definition.
#
# Note: Only supported image types are contained here.
# Thus you will not see XML tags for the following:
# - STUB_IMAGE
# - PARALLELS
#
# Furthermore, we don't support IMAGELESS builds
# in the context of a product definition.
#
xmlTagNameImageTypeMap = {
'amiImage': AMI,
'applianceIsoImage': APPLIANCE_ISO,
'deferredImage': DEFERRED_IMAGE,
'dockerImage': DOCKER_IMAGE,
'installableIsoImage': INSTALLABLE_ISO,
'liveIsoImage': LIVE_ISO,
'netbootImage': NETBOOT_IMAGE,
'rawFsImage': RAW_FS_IMAGE,
'rawHdImage': RAW_HD_IMAGE,
'tarballImage': TARBALL,
'updateIsoImage': UPDATE_ISO,
'vhdImage': VIRTUAL_PC_IMAGE,
'virtualIronImage': VIRTUAL_IRON,
'vmwareImage': VMWARE_IMAGE,
'vmwareEsxImage': VMWARE_ESX_IMAGE,
'vmwareOvfImage': VMWARE_OVF_IMAGE,
'xenOvaImage': XEN_OVA,
'imageless': IMAGELESS,
'windowsIsoImage': WINDOWS_ISO,
'wimImage': WINDOWS_WIM,
}
imageTypeXmlTagNameMap = dict([(v,k) for k,v in xmlTagNameImageTypeMap.iteritems()])
typeNamesMarketing = {
NETBOOT_IMAGE: "Netboot Image",
INSTALLABLE_ISO: "Legacy Installable CD/DVD",
RAW_FS_IMAGE: "Eucalyptus/Mountable Filesystem",
STUB_IMAGE: "Stub Image",
RAW_HD_IMAGE: "OpenStack/KVM/QEMU/Raw Hard Disk",
VMWARE_IMAGE: "VMware(R) Workstation/Fusion / Parallels(R) Virtual Appliance",
VMWARE_ESX_IMAGE: "VMware(R) ESX/VCD / Oracle(R) VirtualBox Virtual Appliance",
VMWARE_OVF_IMAGE: "VMware(R) Virtual Appliance OVF",
LIVE_ISO: "Demo CD/DVD (Live CD/DVD)",
TARBALL: "TAR File",
VIRTUAL_PC_IMAGE: "VHD for Microsoft(R) Hyper-V(R)",
XEN_OVA: "Citrix(R) XenServer(TM) Appliance",
VIRTUAL_IRON: "Virtual Iron Virtual Appliance",
PARALLELS: "Parallels(R) Virtual Appliance",
AMI: "Amazon Machine Image (EC2)",
UPDATE_ISO: "Update CD/DVD",
APPLIANCE_ISO: "Appliance Installable ISO",
DEFERRED_IMAGE: "Layered Image",
WINDOWS_ISO: "Installable CD/DVD (ISO)",
WINDOWS_WIM: "Windows Imaging Format (WIM)",
IMAGELESS: "Online Update",
DOCKER_IMAGE: "Docker Image",
# flavor flags here
XEN_DOMU: "DomU",
APPLIANCE: "Appliance",
}
buildTypeExtra = {
APPLIANCE_ISO: "This image type will not work without using "
"a version of anaconda-templates based on "
"rPath Linux 2.",
IMAGELESS: "Select this image type to mark a group for "
"later publishing to an Update Service."
}
buildTypeIcons = {
VMWARE_IMAGE: dict(
icon="get-vmware-player.png",
href="http://www.vmware.com/download/player/",
text="Download VMware Player"),
RAW_HD_IMAGE: dict(
icon="get-parallels.png",
href="http://www.parallels.com/",
text="Try Parallels Workstation 2.2"),
VIRTUAL_IRON: dict(
icon="get-virtual-iron.png",
href="http://www.virtualiron.com/free",
text="Virtual Iron: Download Now"),
XEN_OVA: dict(
icon="get-xen-express.gif",
href="http://www.citrix.com/xenserver/getexpress",
text="Citrix XenServer Express Edition: Download Now",
),
VIRTUAL_PC_IMAGE: dict(
icon="get-hyper-v.png",
href="http://www.microsoft.com/Hyper-V",
text="Learn more about Microsoft Hyper-V",
),
}
typeFlavorOverride = {
(RAW_HD_IMAGE, XEN_DOMU): dict(
marketingName="Raw Hard Disk Image",
icon=False,
),
}
# sizes are listed in bytes...
discSizes = {
'CD: 650 MB' : '681574400',
'CD: 700 MB' : '734003200',
'DVD: 4.7 GB' : '4700000000',
'DVD: 8.5 GB' : '8500000000',
}
buildDefinitionFlavorTypes = {
'BD_GENERIC_X86' : 0,
'BD_GENERIC_X86_64' : 1,
'BD_DOM0_X86' : 2,
'BD_DOM0_X86_64' : 3,
'BD_DOMU_X86' : 4,
'BD_DOMU_X86_64' : 5,
'BD_VMWARE_X86' : 6,
'BD_VMWARE_X86_64' : 7,
}
sys.modules[__name__].__dict__.update(buildDefinitionFlavorTypes)
buildDefinitionFlavorMap = {
BD_GENERIC_X86 : '!dom0, !domU, !xen, !vmware is: x86',
BD_GENERIC_X86_64 : '!dom0, !domU, !xen, !vmware is: x86_64',
BD_DOM0_X86 : 'dom0, !domU, xen, !vmware is: x86',
BD_DOM0_X86_64 : 'dom0, !domU, xen, !vmware is: x86_64',
BD_DOMU_X86 : '!dom0, domU, xen, !vmware is: x86',
BD_DOMU_X86_64 : '!dom0, domU, xen, !vmware is: x86_64',
BD_VMWARE_X86 : '!dom0, !domU, !xen, vmware is: x86',
BD_VMWARE_X86_64 : '!dom0, !domU, !xen, vmware is: x86_64',
}
def alphabatizeBuildTypes(visibleBuildTypes):
sortedList = sorted([x for x in visibleBuildTypes if x != IMAGELESS],
key = lambda x: typeNames.get(x))
if IMAGELESS in visibleBuildTypes:
sortedList.insert(0, IMAGELESS)
return sortedList
def makeBuildFlavorMap(prd):
baseFlavor = prd.getBaseFlavor() or prd.getPlatformBaseFlavor() or ''
baseFlavor = deps.parseFlavor(baseFlavor)
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures = prd.platform.getArchitectures()
res = {}
for flavorSet in flavorSets:
for architecture in architectures:
flv = deps.parseFlavor(flavorSet.flavor)
arch = deps.parseFlavor(architecture.flavor)
flavor = deps.overrideFlavor(baseFlavor, flv)
flavor = deps.overrideFlavor(flavor, arch)
res[str(flavor)] = \
"%s %s" % (flavorSet.displayName, architecture.displayName)
return res
def makeFlavorMap(prd):
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures += prd.platform.getArchitectures()
return dict([("%s %s" % (x.displayName, y.displayName),
"%s,%s" % (x.name, y.name)) \
for x in flavorSets for y in architectures])
def makeFlavorsForBuild(prd, key):
# compose a flavor map much like above but filter illegal types
flavorSets = prd.getFlavorSets()
architectures = prd.getArchitectures()
buildTemplates = prd.getBuildTemplates()
if prd.platform:
flavorSets += prd.platform.getFlavorSets()
architectures += prd.platform.getArchitectures()
buildTemplates += prd.platform.getBuildTemplates()
containerTemplateRef = imageTypeXmlTagNameMap.get(key)
if not containerTemplateRef:
return makeFlavorMap(prd)
# for arch and flavorSet, if None is encountered, all available types
# are legal
arches = set([x.architectureRef for x in buildTemplates \
if x.containerTemplateRef == containerTemplateRef])
arches = [x for x in architectures if None in arches or x.name in arches]
flavors = set([x.flavorSetRef for x in buildTemplates \
if x.containerTemplateRef == containerTemplateRef])
flavors = [x for x in flavorSets if None in flavors or x.name in flavors]
return dict([("%s %s" % (x.displayName, y.displayName),
"%s,%s" % (x.name, y.name)) \
for x in flavors for y in arches])
# generate mapping of flavors to flavor names
buildDefinitionFlavorToFlavorMapRev = \
dict((x[1], x[0]) for x in buildDefinitionFlavorMap.iteritems())
buildDefinitionFlavorNameMap = {
BD_GENERIC_X86 : 'Generic x86 (32-bit)',
BD_GENERIC_X86_64 : 'Generic x86 (64-bit)',
BD_DOM0_X86 : 'dom0 x86 (32-bit)',
BD_DOM0_X86_64 : 'dom0 x86 (64-bit)',
BD_DOMU_X86 : 'domU x86 (32-bit)',
BD_DOMU_X86_64 : 'domU x86 (64-bit)',
BD_VMWARE_X86 : 'VMware x86 (32-bit)',
BD_VMWARE_X86_64 : 'VMware x86 (64-bit)',
}
# a mapping of build types to supported flavors. If a build type does not
# exist in this map, it is assumed it supports all flavors. The first flavor
# is assumed to be the default.
buildDefinitionSupportedFlavorsMap = {
VMWARE_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64],
VMWARE_ESX_IMAGE : [BD_VMWARE_X86, BD_VMWARE_X86_64],
XEN_OVA : [BD_DOMU_X86, BD_DOMU_X86_64],
AMI : [BD_DOMU_X86, BD_DOMU_X86_64],
}
# code generator run by make to generate javascript constants
# should only be run by the makefile in mint/web/content/javascript
def codegen():
s = "// this Javascript was generated by mint/buildtypes.py\n"
s += "// do not edit or check into source control\n"
s += "var maxBuildType = %d;" % max(validBuildTypes.values())
s += "var buildTypeNames = {"
i = []
for k, v in typeNames.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
s += "var buildTypeNamesShort = {"
i = []
for k, v in typeNamesShort.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
s += "var buildTypeNamesMarketing = {"
i = []
for k, v in typeNamesMarketing.items():
i.append(" '%d': '%s'" % (k, v,))
s += ", ".join(i)
s += "};"
for k, v in validBuildTypes.items():
s += "%s = %d;\n" % (k, v)
return s
if __name__ == "__main__": #pragma: no cover
if len(sys.argv) > 1 and sys.argv[1] == "--genjs":
print codegen()
sys.exit(0)
else:
sys.exit(1)
|
apache-2.0
| -8,518,906,341,440,424,000
| 33.970803
| 88
| 0.598066
| false
| 3.110366
| false
| false
| false
|
KIOS-Research/effinet-smart-water-game
|
test.py
|
1
|
6635
|
# -*- coding: cp1253 -*-
from tkinter import *
from time import sleep
def create(w, x1, y1):
w.place(x=x1, y=y1)
def erase(w):
w.destroy()
def reset(w):
w.destroy()
start()
def exit(w):
w.destroy()
def e_q1(root, counter, step):
TL = Toplevel()
w, h = TL.winfo_screenwidth(), TL.winfo_screenheight()
TL.overrideredirect(1)
TL.geometry("%dx%d+0+0" % (w, h))
a01 = 0
a02 = 0
a03 = 0
if step == 1:
question = "Question 1: How much of Earth's water is salty and undrinkable?"
a1 = "37%"
a2 = "97%"
a3 = "67%"
backfile = "1.gif" # effinet
solution = "1a.gif"
a02 = 1
elif step == 2:
question = "Question 2: How much water do Europeans use per day on average?"
a1 = "50 Liters"
a2 = "150 Liters"
a3 = "10 Liters"
solution = ""
backfile = "2.gif" # William Newman
a02 = 1
elif step == 3:
question = "Question 3: Which substance do water companies use to kill bacteria in water?"
a1 = "Soap"
a2 = "Citric Acid"
a3 = "Chlorine"
solution = ""
backfile = "3.gif" # Jacob Vanderheyden
a03 = 1
elif step == 4:
question = "Question 4: How much water is lost due to leakages in Cyprus?"
a1 = "Around 20%"
a2 = "Around 50%"
a3 = "Around 12%"
solution = ""
backfile = "4.gif" # Pete
a01 = 1
elif step == 5:
question = "Question 5: What is the energy cost to deliver water to consumers in Barcelona, Spain?"
a1 = "7 Million Euros"
a2 = "700,000 Euros"
a3 = "70 Million Euros"
solution = ""
backfile = "5.gif" #
a01 = 1
elif step == 6:
question = "Question 6: How water utilities detect leakages?"
a1 = "Using many sensors"
a2 = "Monitoring night flow increase"
a3 = "Consumer complaints"
solution = ""
backfile = "6.gif" #
a02 = 1
elif step == 7:
question = "Question 7: A water tank is equivalent to:"
a1 = "A battery"
a2 = "A lamp"
a3 = "A switch"
backfile = "7.gif" #
solution = ""
a01 = 1
elif step == 8:
question = "Question 8: The most energy consumption in a water network goes for"
a1 = "Disinfection System"
a2 = "ICT Functions"
a3 = "Pump operations"
solution = ""
backfile = "8.gif" #
a03 = 1
elif step == 9:
question = "Question 9: How can we reduce energy usage in water networks?"
a1 = "Use pumps during off-peak hours"
a2 = "Use ground water"
a3 = "Increase water prices"
solution = ""
backfile = "9.gif" #
a01 = 1
elif step == 10:
question = "Question 10: In the future, water utilities will"
a1 = "Communicate information to the consumers"
a2 = "Get information directly from the consumers"
a3 = "Both of the above"
solution = ""
backfile = "10.gif" #
a03 = 1
photo = PhotoImage(file=backfile)
wback = Label(TL, image=photo)
wback.photo = photo
wback.place(x=-5, y=-5)
photo = PhotoImage(file="logo2.gif")
wlogo = Label(TL, image=photo)
wlogo.photo = photo
wlogo.place(x=1050, y=100)
l = Label(TL, text=question, font="Verdana 20", bg="Plum", pady=10)
l.pack(side=TOP)
b2 = Button(TL, text=a1, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a01, counter, step,solution))
b2.pack()
b2.place(x=500, y=250)
b3 = Button(TL, text=a2, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a02, counter, step,solution))
b3.pack()
b3.place(x=500, y=340)
b2 = Button(TL, text=a3, bd=10, width=35, font="Verdana 11 bold", bg="Darkred", fg="White",
command=lambda: e_correct1(root, TL, a03, counter, step, solution))
b2.pack()
b2.place(x=500, y=430)
# ex = Button(window2, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
# command=lambda: exit2(window1))
#ex.pack()
#ex.place(x=1168, y=725)
ex1 = Button(TL, text="RESET", bd=1, width=8, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: TL.destroy())
ex1.pack()
ex1.place(x=1048, y=725)
def e_correct1(root, TL, a, counter, step, solution):
#t = Text(TL, text=solution, font="Verdana 20", bg="Plum")
#t.place(100,20)
#l = Label(TL, text=solution, font="Verdana 20", bg="Plum", pady=10)
#l.pack(side=BOTTOM)
photo = PhotoImage(file=solution)
wsol = Label(TL, image=photo)
wsol.photo = photo
wsol.place(x=100, y=100)
if a == 1:
counter += 1
photo = PhotoImage(file="cr.gif")
w = Label(TL, image=photo)
w.photo = photo
w.place(x=570, y=60)
else:
photo = PhotoImage(file="wr.gif")
w = Label(TL, image=photo)
w.photo = photo
w.place(x=570, y=60)
if step < 10:
TL.update()
sleep(3)
e_q1(root, counter, step + 1)
TL.destroy()
else:
sleep(0.5)
backfile = '0.gif'
photo = PhotoImage(file=backfile)
w = Label(TL, image=photo)
w.photo = photo
w.place(x=-5, y=-5)
ex = Button(TL, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: root.destroy())
ex.pack()
ex.place(x=1168, y=725)
# t= lambda: reset(w)
#window2.after(1500, t)
def start():
root = Tk()
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
photo = PhotoImage(file="0.gif")
w = Label(root, image=photo)
w.photo = photo
w.place(x=-5, y=-5)
photo = PhotoImage(file="logo2.gif")
w = Label(root, image=photo)
w.photo = photo
w.place(x=1050, y=100)
counter = 0
step = 1
b2 = Button(root, text='Begin Smart Water Challenge!', bd=10, height=1, font="Verdana 14 bold", bg="Black",
fg="White", command=lambda: e_q1(root, counter, step), compound=CENTER)
b2.pack()
b2.place(x=500, y=350)
ex = Button(root, text="EXIT", bd=1, width=6, font="Verdana 10 bold", bg="red", fg="White",
command=lambda: root.destroy())
ex.pack()
ex.place(x=1168, y=725)
root.mainloop()
start()
|
bsd-2-clause
| 6,492,672,037,668,850,000
| 30.009346
| 111
| 0.547099
| false
| 3.049173
| false
| false
| false
|
bbengfort/inigo
|
inigo/image.py
|
1
|
7931
|
# inigo.image
# Handles data dealing with images, particularly EXIF for JPEG
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sun Jun 14 22:32:17 2015 -0400
#
# Copyright (C) 2015 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: image.py [] benjamin@bengfort.com $
"""
Handles data dealing with images, particularly EXIF for JPEG
"""
##########################################################################
## Imports
##########################################################################
from inigo.fs import FileMeta
from PIL import Image, ExifTags
from datetime import datetime
from dateutil.tz import tzutc
from inigo.config import settings
from inigo.utils.timez import epochptime
from inigo.utils.decorators import memoized
from inigo.exceptions import PictureNotFound
from inigo.models import STYPE, create_session
from inigo.models import Picture, Storage
from inigo.utils.timez import tzaware_now
from sqlalchemy.sql import exists
from geopy.geocoders import GoogleV3
##########################################################################
## Module Constants
##########################################################################
EXIF_DATE_FORMAT = "%Y:%m:%d %H:%M:%S"
##########################################################################
## Helper functions
##########################################################################
def convert_to_degrees(value):
"""
Helper function to convert GPS coordinates stored in EXIF degrees to a
decimal float format, though this function does not take into account
N/S or E/W cardinality of the degree vector.
"""
deg = float(value[0][0]) / float(value[0][1])
mns = float(value[1][0]) / float(value[1][1])
sec = float(value[2][0]) / float(value[2][1])
return deg + (mns / 60.0) + (sec / 3600.0)
##########################################################################
## Image Node
##########################################################################
class ImageMeta(FileMeta):
"""
Wraps a path and provides image meta data.
"""
@property
def exif(self):
"""
Uses Pillow to extract the EXIF data
"""
if not hasattr(self, '_exif'):
self.read_image_data()
return self._exif
@property
def dimensions(self):
"""
Returns a tuple of the width and height of the image.
"""
if not hasattr(self, '_dimensions'):
self.read_image_data()
return self._dimensions
@memoized
def date_taken(self):
"""
Attempts to find the date taken. Returns any timestamp, even if it is
just the date created on the file meta. Current logic for the method:
1. Attempt to parse DateTimeOriginal from EXIF
2. Return st_ctime from os.stat
"""
dtorig = self.exif.get('DateTimeOriginal', None)
if dtorig:
return datetime.strptime(dtorig, EXIF_DATE_FORMAT).replace(tzinfo=tzutc())
return epochptime(self.stat().st_ctime)
@memoized
def coordinates(self):
"""
Returns the latitude and longitude as a tuple.
"""
lat = lon = None
# Decode the GPSInfo tags
if "GPSInfo" in self.exif:
self.exif["GPSInfo"] = {
ExifTags.GPSTAGS[k]: v
for k,v in self.exif["GPSInfo"].iteritems()
if k in ExifTags.GPSTAGS
}
# Gather GPS data points
gps_info = self.exif["GPSInfo"]
gps_lat = gps_info.get("GPSLatitude", None)
gps_lon = gps_info.get("GPSLongitude", None)
gps_lat_ref = gps_info.get("GPSLatitudeRef", None)
gps_lon_ref = gps_info.get("GPSLongitudeRef", None)
# Perform GPS conversions
if gps_lat and gps_lon and gps_lat_ref and gps_lon_ref:
lat = convert_to_degrees(gps_lat)
if gps_lat_ref != "N":
lat = 0 - lat
lon = convert_to_degrees(gps_lon)
if gps_lon_ref != "E":
lon = 0 - lon
return (lat, lon)
@memoized
def address(self):
"""
Reverses the address from the coordinates
"""
if not self.coordinates:
return
geocoder = GoogleV3(api_key=settings.geocode.apikey)
query = "{},{}".format(*self.coordinates)
result = geocoder.reverse(query, exactly_one=True, sensor=False)
if result:
return result.address
def read_image_data(self):
"""
Reads the image data and returns specific information.
"""
with Image.open(self.path) as img:
# Read size data
self._dimensions = img.size
# Read EXIF data
exifdata = img._getexif() if hasattr(img, "_getexif") else {}
self._exif = {
ExifTags.TAGS[k]: v
for k,v in exifdata.iteritems()
if k in ExifTags.TAGS
} if exifdata else {}
def save(self, session=None, commit=False):
"""
Stores the image information in the database along with the current
file path. Pass a session object in to use the same session for
multiple saves.
This method returns the session object. Will commit if required.
"""
session = session or create_session()
if not session.query(exists().where(
Picture.signature == self.signature
)).scalar():
session.add(Picture(
signature = self.signature,
date_taken = self.date_taken,
latitude = self.coordinates[0] if self.coordinates else None,
longitude = self.coordinates[1] if self.coordinates else None,
width = self.dimensions[0],
height = self.dimensions[1],
mimetype = unicode(self.mimetype),
bytes = self.filesize,
))
if commit:
session.commit()
return session
def save_storage(self, session=None, commit=False, **skwargs):
"""
Saves the storage associated with this image and file meta.
"""
session = session or create_session()
# Fetch the picture from the database
picture = session.query(Picture)
picture = picture.filter(Picture.signature == self.signature).first()
if not picture:
raise PictureNotFound(
"Must save the picture before assigning storages."
)
# Create the storage object
sdata = {
"stype": STYPE.ORIGINAL,
"hostname": unicode(self.hostname),
"filepath": unicode(self.path),
"memo": None,
"picture": picture,
"modified": tzaware_now(),
}
sdata.update(skwargs)
# Attempt to fetch the storage on the dependent keys
storage = session.query(Storage)
storage = storage.filter(Storage.stype == sdata['stype'])
storage = storage.filter(Storage.hostname == sdata['hostname'])
storage = storage.filter(Storage.filepath == sdata['filepath'])
storage = storage.filter(Storage.picture == sdata['picture'])
storage = storage.first() or Storage()
# Set the new values on the storage object
for key, val in sdata.iteritems():
setattr(storage, key, val)
session.add(storage)
if commit:
session.commit()
return session
if __name__ == '__main__':
import os
from inigo.config import PROJECT
img = ImageMeta(os.path.join(PROJECT, "fixtures/animals/land/cats/cat.jpg"))
print img.date_taken
print img.dimensions
|
mit
| 2,563,230,144,697,206,000
| 30.724
| 86
| 0.536628
| false
| 4.31267
| false
| false
| false
|
macarthur-lab/xbrowse
|
seqr/views/apis/locus_list_api_tests.py
|
1
|
8486
|
import json
import mock
from django.test import TransactionTestCase
from django.urls.base import reverse
from seqr.models import LocusList, Project
from seqr.views.apis.locus_list_api import locus_lists, locus_list_info, create_locus_list_handler, \
update_locus_list_handler, delete_locus_list_handler, add_project_locus_lists, delete_project_locus_lists
from seqr.views.utils.orm_to_json_utils import get_project_locus_list_models
from seqr.views.utils.test_utils import _check_login
LOCUS_LIST_GUID = 'LL00049_pid_genes_autosomal_do'
PROJECT_GUID = 'R0001_1kg'
class LocusListAPITest(TransactionTestCase):
fixtures = ['users', '1kg_project', 'reference_data']
def test_locus_lists(self):
url = reverse(locus_lists)
_check_login(self, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
locus_lists_dict = response.json()['locusListsByGuid']
self.assertSetEqual(set(locus_lists_dict.keys()), {'LL00049_pid_genes_autosomal_do', 'LL00005_retina_proteome'})
locus_list = locus_lists_dict[LOCUS_LIST_GUID]
self.assertSetEqual(
set(locus_list.keys()),
{'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate',
'canEdit', 'name'}
)
def test_locus_list_info(self):
url = reverse(locus_list_info, args=[LOCUS_LIST_GUID])
_check_login(self, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response_json = response.json()
locus_lists_dict = response_json['locusListsByGuid']
self.assertListEqual(locus_lists_dict.keys(), [LOCUS_LIST_GUID])
locus_list = locus_lists_dict[LOCUS_LIST_GUID]
self.assertSetEqual(
set(locus_list.keys()),
{'locusListGuid', 'description', 'lastModifiedDate', 'numEntries', 'isPublic', 'createdBy', 'createdDate',
'canEdit', 'name', 'items', 'intervalGenomeVersion'}
)
self.assertSetEqual(
{item['geneId'] for item in locus_list['items'] if item.get('geneId')},
set(response_json['genesById'].keys())
)
def test_create_update_and_delete_locus_list(self):
create_locus_list_url = reverse(create_locus_list_handler)
_check_login(self, create_locus_list_url)
# send invalid requests to create locus_list
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({}))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase, '"Name" is required')
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({
'name': 'new_locus_list', 'isPublic': True, 'rawItems': 'DDX11L1, foo 10:10-1 chr100:1-10 \n2:1234-5678',
}))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.reason_phrase, 'This list contains invalid genes/ intervals. Update them, or select the "Ignore invalid genes and intervals" checkbox to ignore.')
self.assertListEqual(response.json()['invalidLocusListItems'], ['chr10:10-1', 'chr100:1-10', 'foo'])
# send valid request to create locus_list
response = self.client.post(create_locus_list_url, content_type='application/json', data=json.dumps({
'name': 'new_locus_list', 'isPublic': True, 'ignoreInvalidItems': True,
'rawItems': 'DDX11L1, foo chr100:1-1 \nchr2:1234-5678',
}))
self.assertEqual(response.status_code, 200)
new_locus_list_response = response.json()
self.assertEqual(len(new_locus_list_response['locusListsByGuid']), 1)
new_locus_list = new_locus_list_response['locusListsByGuid'].values()[0]
self.assertEqual(new_locus_list['name'], 'new_locus_list')
self.assertEqual(new_locus_list['isPublic'], True)
self.assertSetEqual(
{item['geneId'] for item in new_locus_list['items'] if item.get('geneId')},
set(new_locus_list_response['genesById'].keys())
)
self.assertListEqual(
new_locus_list['items'],
[
{'geneId': 'ENSG00000223972'},
{'chrom': '2', 'start': 1234, 'end': 5678, 'genomeVersion': '37', 'locusListIntervalGuid': mock.ANY}
]
)
guid = new_locus_list['locusListGuid']
gene_id = new_locus_list['items'][0]['geneId']
new_locus_list_model = LocusList.objects.filter(guid=guid).first()
self.assertIsNotNone(new_locus_list_model)
self.assertEqual(new_locus_list_model.name, new_locus_list['name'])
self.assertEqual(new_locus_list_model.is_public, new_locus_list['isPublic'])
self.assertEqual(new_locus_list_model.locuslistgene_set.count(), 1)
self.assertEqual(new_locus_list_model.locuslistgene_set.first().gene_id, gene_id)
self.assertEqual(new_locus_list_model.locuslistinterval_set.count(), 1)
new_interval = new_locus_list_model.locuslistinterval_set.first()
self.assertEqual(new_interval.chrom, '2')
self.assertEqual(new_interval.start, 1234)
# update the locus_list
update_locus_list_url = reverse(update_locus_list_handler, args=[guid])
response = self.client.post(update_locus_list_url, content_type='application/json', data=json.dumps(
{'name': 'updated_locus_list', 'isPublic': False, 'rawItems': 'DDX11L1 FAM138A'}))
self.assertEqual(response.status_code, 200)
updated_locus_list_response = response.json()
self.assertEqual(len(updated_locus_list_response['locusListsByGuid']), 1)
updated_locus_list = updated_locus_list_response['locusListsByGuid'].values()[0]
self.assertEqual(updated_locus_list['name'], 'updated_locus_list')
self.assertEqual(updated_locus_list['isPublic'], False)
self.assertEqual(len(updated_locus_list_response['genesById']), 2)
self.assertTrue(gene_id in updated_locus_list_response['genesById'])
new_gene_id = next(gid for gid in updated_locus_list_response['genesById'] if gid != gene_id)
self.assertSetEqual({item['geneId'] for item in updated_locus_list['items']}, {new_gene_id, gene_id})
updated_locus_list_model = LocusList.objects.filter(guid=guid).first()
self.assertIsNotNone(updated_locus_list_model)
self.assertEqual(updated_locus_list_model.name, updated_locus_list['name'])
self.assertEqual(updated_locus_list_model.is_public, updated_locus_list['isPublic'])
self.assertEqual(updated_locus_list_model.locuslistgene_set.count(), 2)
self.assertEqual(updated_locus_list_model.locuslistgene_set.last().gene_id, new_gene_id)
self.assertEqual(updated_locus_list_model.locuslistinterval_set.count(), 0)
# delete the locus_list
delete_locus_list_url = reverse(delete_locus_list_handler, args=[guid])
response = self.client.post(delete_locus_list_url, content_type='application/json')
self.assertEqual(response.status_code, 200)
# check that locus_list was deleted
new_locus_list = LocusList.objects.filter(guid=guid)
self.assertEqual(len(new_locus_list), 0)
def test_add_and_remove_project_locus_lists(self):
project = Project.objects.get(guid=PROJECT_GUID)
self.assertListEqual(list(get_project_locus_list_models(project)), [])
# add a locus list
url = reverse(add_project_locus_lists, args=[PROJECT_GUID])
_check_login(self, url)
response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]}))
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.json()['locusListGuids'], [LOCUS_LIST_GUID])
self.assertListEqual(list(get_project_locus_list_models(project)), [LocusList.objects.get(guid=LOCUS_LIST_GUID)])
# remove a locus list
url = reverse(delete_project_locus_lists, args=[PROJECT_GUID])
response = self.client.post(url, content_type='application/json', data=json.dumps({'locusListGuids': [LOCUS_LIST_GUID]}))
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.json()['locusListGuids'], [])
self.assertListEqual(list(get_project_locus_list_models(project)), [])
|
agpl-3.0
| 3,673,026,669,628,493,000
| 49.511905
| 180
| 0.665213
| false
| 3.397118
| true
| false
| false
|
bjtox/ec2ssh-manager
|
ec2ssh/ec2ssh.py
|
1
|
11122
|
import subprocess
import boto3
import sys
import configparser
from codecs import open
from os.path import expanduser
import os
import glob
import inquirer
import argparse
import libtmux
import time
class Connector:
def __init__(self, connection_name, profile):
self.hosts_folder = expanduser("~")
print(self.hosts_folder)
self.profile = profile
self.directory_to_save = self.hosts_folder+'/.ec2ssh/hosts/'
if not os.path.exists(self.directory_to_save):
os.makedirs(self.directory_to_save)
if connection_name != None:
self.connection_name = connection_name
self.config = self.read_config(connection_name)
if self.config != False:
self.port = self.config['Connection']['connection_port']
self.region_name = self.config['Connection']['region']
def open_tmux(self,selects,connection_name, region, profile, port):
server = libtmux.Server()
session = server.list_sessions()[0]
print(session)
window = session.new_window(attach=True, window_name=connection_name+str(round(time.time() * 1000)))
instances = len(selects)
print(instances)
print(instances % 2 == 0)
if instances % 2 == 0:
count = 1
else:
count = 0
while (count < instances):
window.split_window()
window.select_layout('tiled')
count += 1
selection = 1
for pane in window.list_panes():
pane.send_keys('ec2ssh connect -n {} -p {}'.format(connection_name,profile))
pane.send_keys(str(selection))
selection += 1
window.set_window_option('synchronize-panes', True)
def printMenu(self):
print (30 * '-')
print (" M A I N - M E N U")
print (30 * '-')
print ("1. Direct Connect")
print ("2. Pass from Bastion Host")
print ("3. Autoscaling")
print (30 * '-')
def read_config(self,host):
if os.path.isfile(self.directory_to_save+host+'.ini'):
config = configparser.ConfigParser()
config.sections()
config.read(self.directory_to_save+host+'.ini')
return(config);
else:
return False
def query_yes_no(self,question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def addConfig(self):
config = configparser.ConfigParser()
self.printMenu()
valid_choise=0
usr_input = ''
while usr_input not in ['1', '2', '3']:
if valid_choise :
print("Not Valid Choise")
valid_choise=1
usr_input = input("Input: ")
config['Connection']= {}
config['Connection']['region'] = input('Specify a Region:\n-> ')
config['Connection']['connection_port'] = input('Specify a connection port (for direct or for Bastion):\n-> ')
config['Connection']['profile'] = input('Specify which AWS profile use:\n-> ')
if not config['Connection']['profile']:
config['Connection']['profile'] = 'default'
if usr_input == "1":
config['Connection']['type'] = "direct"
config['EC2INSTANCE'] = {}
config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ')
config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ')
config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ')
if not config['EC2INSTANCE']['user']:
config['EC2INSTANCE']['user'] = 'ec2-user'
elif usr_input == "2":
config['Connection']['type'] = "bastion"
config['EC2INSTANCE'] = {}
config['EC2INSTANCE']['pem_path'] = input('Enter a keyPair EC2 file path (absolute path):\n-> ')
config['EC2INSTANCE']['user'] = input('Enter a EC2 user (default "ec2-user"):\n-> ')
config['EC2INSTANCE']['ec2_instance_id'] = input('Enter a EC2 Instance ID:\n-> ')
config['BASTIONHOST'] = {}
config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ')
config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ')
config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ')
if not config['EC2INSTANCE']['user']:
config['EC2INSTANCE']['user'] = 'ec2-user'
elif usr_input == "3":
config['Connection']['type'] = "asg"
config['ASG'] = {}
config['ASG']['pem_path'] = input('Enter a pem file path (absolute path):\n-> ')
config['ASG']['user'] = input('Enter a user (default "ec2-user"):\n-> ')
config['ASG']['name'] = input('Enter a ASG Name ID:\n-> ')
if not config['ASG']['user']:
config['ASG']['user'] = 'ec2-user'
questions = self.query_yes_no("ASG allow ssh only from Bastion Host?")
if questions == True:
config['BASTIONHOST'] = {}
config['BASTIONHOST']['b_pem_path'] = input('Enter a Bastion pem file path (absolute path):\n-> ')
config['BASTIONHOST']['b_user'] = input('Enter a Bastion user:\n-> ')
config['BASTIONHOST']['b_ec2_instance_id'] = input('Enter a Bastion Instance ID:\n-> ')
with open(self.directory_to_save+self.connection_name+'.ini', 'w') as configfile:
config.write(configfile)
print("File Config "+self.connection_name+" created")
def direct_connect(self,ec2_instance_config):
target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']}
target_ec2 = self.client
target_response = target_ec2.describe_instances(InstanceIds=[target['host']])
target_ip = target_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
subprocess.call("ssh-add {}".format(target['key']), shell=True)
subprocess.call("ssh {}@{} -p {}".format(target['user'], target_ip, self.port), shell=True)
def bastion_connect(self,ec2_instance_config,bastion_config):
target = {'key': ec2_instance_config['pem_path'], 'user': ec2_instance_config['user'], 'host': ec2_instance_config['ec2_instance_id']}
target_ec2 = self.client
target_response = target_ec2.describe_instances(InstanceIds=[target['host']])
bastion = {'key': bastion_config['b_pem_path'], 'user': bastion_config['b_user'], 'host': bastion_config['b_ec2_instance_id']}
bastion_ec2 = self.client
bastion_response = bastion_ec2.describe_instances(InstanceIds=[bastion['host']])
bastion_ip = bastion_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
target_ip = target_response['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['PrivateIpAddress']
subprocess.call("ssh-add {} {}".format(bastion['key'], target['key']), shell=True)
subprocess.call("ssh -t -A {}@{} -p {} ssh {}@{}".format(bastion['user'], bastion_ip,self.port, target['user'], target_ip), shell=True)
def ec2ssh(self):
self.session = boto3.Session(profile_name=self.profile)
self.client = self.session.client('ec2',region_name=self.config['Connection']['region'])
config = self.read_config(self.connection_name)
if config['Connection']['type'] == "direct":
self.direct_connect(config['EC2INSTANCE'])
elif config['Connection']['type'] == "bastion":
self.bastion_connect(config['EC2INSTANCE'], config['BASTIONHOST'])
elif config['Connection']['type'] == "asg":
print ('Please select an option:')
print (" 0. All")
i=1
selects = {}
for instance in self.list_instance_in_asg(config['ASG']['name']):
print (" "+str(i)+". "+instance['InstanceId']+" - "+instance['LifecycleState'])
selects[i]=instance['InstanceId']
i+=1
config_asg = {}
choise = input('Enter Value: ')
if choise != "0":
config_asg['pem_path']=config['ASG']['pem_path']
config_asg['user']=config['ASG']['user']
config_asg['ec2_instance_id']=selects[int(choise)]
if config.has_section('BASTIONHOST'):
config_asg_bastion = {}
config_asg_bastion['b_pem_path']=config['BASTIONHOST']['b_pem_path']
config_asg_bastion['b_user']=config['BASTIONHOST']['b_user']
config_asg_bastion['b_ec2_instance_id']=config['BASTIONHOST']['b_ec2_instance_id']
self.bastion_connect(config_asg, config_asg_bastion)
else:
self.direct_connect(config_asg)
else:
self.open_tmux(selects, self.connection_name, self.region_name, self.profile, self.port)
def list_avaible_connection(self):
print (30 * '-')
for file in os.listdir(self.directory_to_save):
if file.endswith(".ini"):
name_file = file.replace('.ini','')
print(" Connection Name: "+name_file)
config = self.read_config(name_file)
print(" Type: "+config['Connection']['type'])
print(" Region Name: "+config['Connection']['region'])
print(" Connection Port: "+config['Connection']['connection_port'])
if config['Connection']['type'] == "direct":
print(" Key Pair: "+config['EC2INSTANCE']['pem_path'])
print(" User Pair: "+config['EC2INSTANCE']['user'])
print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id'])
elif config['Connection']['type'] == "bastion":
print(" Key Pair: "+config['EC2INSTANCE']['pem_path'])
print(" User Pair: "+config['EC2INSTANCE']['user'])
print(" Instance Id Pair: "+config['EC2INSTANCE']['ec2_instance_id'])
print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id'])
elif config['Connection']['type'] == "asg":
print(" Key Pair: "+config['ASG']['pem_path'])
print(" User Pair: "+config['ASG']['user'])
print(" ASG Name: "+config['ASG']['name'])
print(" Bastion Id: "+config['BASTIONHOST']['b_ec2_instance_id'])
print (30 * '-')
def list_instance_in_asg(self, asg_name):
if self.profile!=None:
asg_client = self.session.client('autoscaling',region_name=self.region_name)
else:
asg_client = boto3.client('autoscaling',region_name=self.region_name)
response = asg_client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
asg_name,
]
)
return response['AutoScalingGroups'][0]['Instances']
def rm_connecition(self):
try:
os.remove(self.directory_to_save+self.connection_name+'.ini')
print(self.connection_name+" connection was removed!")
except OSError:
print(self.connection_name+" connection doesn't exist!")
pass
|
mit
| -2,762,748,396,079,273,000
| 39.155235
| 139
| 0.605107
| false
| 3.521849
| true
| false
| false
|
mmahut/openshift-ansible
|
roles/openshift_health_checker/action_plugins/openshift_health_check.py
|
1
|
5501
|
"""
Ansible action plugin to execute health checks in OpenShift clusters.
"""
# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
from collections import defaultdict
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
# Augment sys.path so that we can import checks from a directory relative to
# this callback plugin.
sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks # noqa: E402
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
task_vars = task_vars or {}
# vars are not supportably available in the callback plugin,
# so record any it will need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
if "openshift" not in task_vars:
result["failed"] = True
result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"
return result
try:
known_checks = self.load_known_checks(tmp, task_vars)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
resolved_checks = resolve_checks(requested_checks, known_checks.values())
except OpenShiftCheckException as e:
result["failed"] = True
result["msg"] = str(e)
return result
result["checks"] = check_results = {}
user_disabled_checks = normalize(task_vars.get('openshift_disable_check', []))
for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
if not check.is_active():
r = dict(skipped=True, skipped_reason="Not active for this host")
elif check_name in user_disabled_checks:
r = dict(skipped=True, skipped_reason="Disabled by user request")
else:
try:
r = check.run()
except OpenShiftCheckException as e:
r = dict(
failed=True,
msg=str(e),
)
if check.changed:
r["changed"] = True
check_results[check_name] = r
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
return result
def load_known_checks(self, tmp, task_vars):
load_checks()
known_checks = {}
for cls in OpenShiftCheck.subclasses():
check_name = cls.name
if check_name in known_checks:
other_cls = known_checks[check_name].__class__
raise OpenShiftCheckException(
"non-unique check name '{}' in: '{}.{}' and '{}.{}'".format(
check_name,
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
return known_checks
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
Resolving a check name expands tag references (e.g., "@tag") to all the
checks that contain the given tag. OpenShiftCheckException is raised if
names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
known_check_names = set(check.name for check in all_checks)
known_tag_names = set(name for check in all_checks for name in check.tags)
check_names = set(name for name in names if not name.startswith('@'))
tag_names = set(name[1:] for name in names if name.startswith('@'))
unknown_check_names = check_names - known_check_names
unknown_tag_names = tag_names - known_tag_names
if unknown_check_names or unknown_tag_names:
msg = []
if unknown_check_names:
msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
if unknown_tag_names:
msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
msg.append('Make sure there is no typo in the playbook and no files are missing.')
raise OpenShiftCheckException('\n'.join(msg))
tag_to_checks = defaultdict(set)
for check in all_checks:
for tag in check.tags:
tag_to_checks[tag].add(check.name)
resolved = check_names.copy()
for tag in tag_names:
resolved.update(tag_to_checks[tag])
return resolved
def normalize(checks):
"""Return a clean list of check names.
The input may be a comma-separated string or a sequence. Leading and
trailing whitespace characters are removed. Empty items are discarded.
"""
if isinstance(checks, string_types):
checks = checks.split(',')
return [name.strip() for name in checks if name.strip()]
|
apache-2.0
| 1,259,086,927,939,239,200
| 36.168919
| 109
| 0.612979
| false
| 4.154834
| false
| false
| false
|
jakub-m/phantomcurl
|
phantomcurl/test/test_post_data.py
|
1
|
1059
|
from nose.tools import *
from phantomcurl.utils import split_post_items
def test_post_data_good():
expected_given = [
([('foo', 'bar')], ['foo=bar']),
([('foo', '')], ['foo=']),
([('foo', '=')], ['foo==']),
([('', '')], ['=']),
([('', '=')], ['==']),
([('', 'bar')], ['=bar'])
]
for expected, given in expected_given:
yield check_post_data_good, expected, given
def check_post_data_good(expected_dict, post_items):
post_dict = split_post_items(post_items)
assert_equals(expected_dict, post_dict)
def test_post_data_bad():
bad_input = ['foo', '']
for input_item in bad_input:
yield check_post_data_bad, input_item
def check_post_data_bad(post_item):
assert_raises(ValueError, split_post_items, [post_item])
#def test_dict_to_post_string():
# assert_in(
# dict_to_post_string({'foo', 'bar'}),
# ['foo=bar'])
# assert_in(
# dict_to_post_string({'foo': '', 'ham': 'spam '}),
# ['foo=&ham=spam+', 'ham=spam+&foo=']
# )
|
gpl-2.0
| -7,154,057,688,203,821,000
| 24.214286
| 60
| 0.525024
| false
| 3.105572
| false
| false
| false
|
richardkiss/pycoinnet
|
pycoinnet/peer/Fetcher.py
|
1
|
4318
|
import asyncio
import logging
import weakref
from pycoin.serialize import b2h_rev
from pycoinnet.InvItem import InvItem, ITEM_TYPE_TX, ITEM_TYPE_BLOCK, ITEM_TYPE_MERKLEBLOCK
class Fetcher:
"""
Fetching a merkleblock also fetches the transactions that follow, and
includes them in the message as the "tx" key.
"""
def __init__(self, peer):
self.peer = peer
self.request_q = asyncio.Queue()
self.futures = weakref.WeakValueDictionary()
getdata_loop_future = asyncio.Task(self._getdata_loop())
next_message = peer.new_get_next_message_f(
filter_f=lambda name, data: name in ["tx", "block", "merkleblock", "notfound"])
peer.add_task(self._fetch_loop(next_message, getdata_loop_future))
def fetch(self, inv_item, timeout=None):
"""
Return the fetched object or None if the remote says it doesn't have it, or
times out by exceeding `timeout` seconds.
"""
future = self.futures.get(inv_item)
if not future:
future = asyncio.Future()
self.futures[inv_item] = future
self.request_q.put_nowait(inv_item)
try:
return (yield from asyncio.wait_for(future, timeout=timeout))
except asyncio.TimeoutError:
return None
def queue_size(self):
pass
# ## TODO: finish
@asyncio.coroutine
def _getdata_loop(self):
while True:
so_far = []
inv_item = yield from self.request_q.get()
while True:
so_far.append(inv_item)
if self.request_q.qsize() == 0 or len(so_far) >= 50000:
break
inv_item = yield from self.request_q.get()
self.peer.send_msg("getdata", items=so_far)
@asyncio.coroutine
def _fetch_loop(self, next_message, getdata_loop_future):
try:
while True:
name, data = yield from next_message()
ITEM_LOOKUP = dict(tx="tx", block="block", merkleblock="header")
if name in ITEM_LOOKUP:
item = data[ITEM_LOOKUP[name]]
the_hash = item.hash()
TYPE_DB = {"tx": ITEM_TYPE_TX,
"block": ITEM_TYPE_BLOCK,
"merkleblock": ITEM_TYPE_MERKLEBLOCK}
the_type = TYPE_DB[name]
inv_item = InvItem(the_type, the_hash)
future = self.futures.get(inv_item)
if name == "merkleblock":
txs = []
for h in data["tx_hashes"]:
name, data = yield from next_message()
if name != "tx":
logging.error(
"insufficient tx messages after merkleblock message: missing %s",
b2h_rev(h))
del self.futures[inv_item]
future.set_result(None)
break
tx = data["tx"]
if tx.hash() != h:
logging.error(
"missing tx message after merkleblock message: missing %s", b2h_rev(h))
del self.futures[inv_item]
future.set_result(None)
break
txs.append(tx)
item.txs = txs
if future is not None:
del self.futures[inv_item]
if not future.done():
future.set_result(item)
else:
logging.info("got %s unsolicited", item.id())
if name == "notfound":
for inv_item in data["items"]:
the_hash = inv_item.data
future = self.futures.get(inv_item)
if future:
del self.futures[inv_item]
future.set_result(None)
except EOFError:
getdata_loop_future.cancel()
|
mit
| -5,082,640,224,147,455,000
| 40.12381
| 107
| 0.46943
| false
| 4.540484
| false
| false
| false
|
GNOME/orca
|
src/orca/scripts/apps/Instantbird/chat.py
|
1
|
6860
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom chat module for Instantbird."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.chat as chat
########################################################################
# #
# The Instantbird chat class. #
# #
########################################################################
class Chat(chat.Chat):
def __init__(self, script, buddyListAncestries):
chat.Chat.__init__(self, script, buddyListAncestries)
########################################################################
# #
# InputEvent handlers and supporting utilities #
# #
########################################################################
def getMessageFromEvent(self, event):
"""Get the actual displayed message. This will almost always be the
unaltered any_data from an event of type object:text-changed:insert.
Arguments:
- event: the Event from which to take the text.
Returns the string which should be presented as the newly-inserted
text. (Things like chatroom name prefacing get handled elsewhere.)
"""
string = ""
# IMs are written in areas that look like bubbles. When a new bubble
# is inserted, we see an embedded object character inserted into the
# document frame. The first paragraph is the bubble title; the
# rest (usually just one) are the message itself.
#
if self._script.utilities.isDocument(event.source):
bubble = event.source[event.detail1]
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH
paragraphs = pyatspi.findAllDescendants(bubble, hasRole)
# If the user opted the non-default, "simple" appearance, then this
# might not be a bubble at all, but a paragraph.
#
if not paragraphs and bubble.getRole() == pyatspi.ROLE_PARAGRAPH:
paragraphs.append(bubble)
for paragraph in paragraphs:
msg = self._script.utilities.substring(paragraph, 0, -1)
if msg == self._script.EMBEDDED_OBJECT_CHARACTER:
# This seems to occur for non-focused conversations.
#
msg = self._script.utilities.substring(paragraph[0], 0, -1)
string = self._script.utilities.appendString(string, msg)
return string
# If we instead have a section, we are writing another message into
# the existing bubble. In this case, we get three separate items
# inserted: a separator, a paragraph with the desired text, and an
# empty section.
#
if event.source.getRole() == pyatspi.ROLE_SECTION:
obj = event.source[event.detail1]
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
except:
pass
else:
string = text.getText(0, -1)
return string
########################################################################
# #
# Convenience methods for identifying, locating different accessibles #
# #
########################################################################
def isChatRoomMsg(self, obj):
"""Returns True if the given accessible is the text object for
associated with a chat room conversation.
Arguments:
- obj: the accessible object to examine.
"""
if not obj:
return False
if self._script.utilities.isDocument(obj):
return True
return obj.getRole() in [pyatspi.ROLE_SECTION, pyatspi.ROLE_PARAGRAPH]
def getChatRoomName(self, obj):
"""Attempts to find the name of the current chat room.
Arguments:
- obj: The accessible of interest
Returns a string containing what we think is the chat room name.
"""
name = ""
ancestor = self._script.utilities.ancestorWithRole(
obj,
[pyatspi.ROLE_SCROLL_PANE, pyatspi.ROLE_FRAME],
[pyatspi.ROLE_APPLICATION])
if ancestor and ancestor.getRole() == pyatspi.ROLE_SCROLL_PANE:
# The scroll pane has a proper labelled by relationship set.
#
name = self._script.utilities.displayedLabel(ancestor)
if not name:
try:
text = self._script.utilities.displayedText(ancestor)
if text.lower().strip() != self._script.name.lower().strip():
name = text
except:
pass
return name
def isFocusedChat(self, obj):
"""Returns True if we plan to treat this chat as focused for
the purpose of deciding whether or not a message should be
presented to the user.
Arguments:
- obj: the accessible object to examine.
"""
# Normally, we'd see if the top level window associated
# with this object had STATE_ACTIVE. That doesn't work
# here. So see if the script for the locusOfFocus is
# this script. If so, the only other possibility is that
# we're in the buddy list instead.
#
if obj and obj.getState().contains(pyatspi.STATE_SHOWING) \
and self._script.utilities.isInActiveApp(obj) \
and not self.isInBuddyList(obj):
return True
return False
|
lgpl-2.1
| -3,200,580,494,293,113,300
| 37.757062
| 79
| 0.530029
| false
| 4.803922
| false
| false
| false
|
wfhio/tramcar
|
job_board/models/site_config.py
|
1
|
2504
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.sites.models import Site
class SiteConfig(models.Model):
expire_after = models.SmallIntegerField(default=30)
# NOTE: We set a default here, but we will override this with a more
# suitable default when we create the SiteConfig instance
admin_email = models.EmailField(default='admin@site')
site = models.OneToOneField(Site, on_delete=models.CASCADE)
remote = models.BooleanField(
default=False,
help_text="Select if this job board is for remote jobs only"
)
protocol = models.CharField(
default='http',
choices=(('http', 'http'), ('https', 'https')),
max_length=5,
help_text="The protocol to use when building links in "
"e-mail templates, etc."
)
google_analytics = models.CharField(
max_length=20,
blank=True,
help_text="Google Analytics Tracking ID"
)
twitter_user = models.CharField(
max_length=15,
blank=True,
help_text="Your site's Twitter username, fill in to "
"have a Follow icon appear on select pages"
)
twitter_consumer_key = models.CharField(max_length=100, blank=True)
twitter_consumer_secret = models.CharField(max_length=100, blank=True)
twitter_access_token = models.CharField(max_length=100, blank=True)
twitter_access_token_secret = models.CharField(max_length=100, blank=True)
stripe_secret_key = models.CharField(max_length=100, blank=True)
stripe_publishable_key = models.CharField(max_length=100, blank=True)
price = models.DecimalField(
max_digits=5,
decimal_places=2,
default=0,
help_text="Price to charge for posting a job, "
"set to 0 to disable charging"
)
mailchimp_username = models.CharField(max_length=20, blank=True)
mailchimp_api_key = models.CharField(max_length=50, blank=True)
mailchimp_list_id = models.CharField(max_length=20, blank=True)
def price_in_cents(self):
# Stripe expects an integer
return int(self.price * 100)
def __str__(self):
return self.site.name
|
mit
| -7,891,469,627,665,054,000
| 42.929825
| 78
| 0.580272
| false
| 4.324698
| false
| false
| false
|
furthz/colegio
|
src/discounts/forms.py
|
1
|
4824
|
from django import forms
from enrollment.models import Servicio
from enrollment.models import TipoServicio
from enrollment.models import Matricula
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.forms import ModelForm, Form
from utils.models import TiposNivel
from django.utils.translation import ugettext_lazy as _
from discounts.models import Descuento
from discounts.models import TipoDescuento
from utils.middleware import get_current_colegio, get_current_userID
##############################################################
# Solicitar Descuentos
##############################################################
class SolicitarDescuentoForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
class Meta:
model = Descuento
fields = [
'matricula',
'tipo_descuento',
'numero_expediente',
'comentario',
]
labels = {
'matricula':_('Solicitante'),
'tipo_descuento':_('Descuento'),
'numero_expediente':_('Nro. Expediente'),
'comentario':_('Comentario'),
}
def ChoiceNiveles(self):
MY_CHOICES = (
('1', 'Inicial'),
('2', 'Primaria'),
('3', 'Secundaria'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
#self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['matricula'].widget.attrs.update({'class': 'form-control'})
self.fields['tipo_descuento'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['comentario'].widget.attrs.update({'class': 'form-control'})
self.fields['matricula'].widget.attrs['editable'] = False
class TipoDescuentForm(ModelForm):
"""
Formulario de la clase Descuento
Nota:
solo se añade como campos los que son definidos por los usuarios
"""
servicio = forms.ModelChoiceField(queryset=Servicio.objects.filter(activo=True))
class Meta:
model = TipoDescuento
fields = [
'servicio',
'descripcion',
'porcentaje',
]
labels = {
'servicio': _('Servicio'),
'descripcion': _('Descripción'),
'porcentaje': _('Porcentaje'),
}
def __init__(self, *args, **kwargs):
colegio = kwargs.pop('colegio', None)
super(TipoDescuentForm, self).__init__(*args, **kwargs)
# self.fields['nivel'] = forms.ChoiceField(choices=self.ChoiceNiveles())
# self.fields['grado'] = forms.ChoiceField(choices=self.ChoiceGrados())
self.fields['servicio'].widget.attrs.update({'class': 'form-control'})
self.fields['descripcion'].widget.attrs.update({'class': 'form-control'})
self.fields['porcentaje'].widget.attrs.update({'class': 'form-control'})
if colegio:
self.fields['servicio'].queryset = Servicio.objects.filter(activo=True,tipo_servicio__colegio__id_colegio=colegio)
##############################################################
# Aprobar Descuentos
##############################################################
class DetalleDescuentosForm(forms.Form):
"""
Formulario para filtar los detalles de Control de ingresos
Nota:
solo se añaden com campos los que son definidos por los usuarios
"""
alumno = forms.CharField(required=False)
anio = forms.CharField()
numero_expediente = forms.CharField(required=False)
estado = forms.CharField()
def ChoiceAnio(self):
MY_CHOICES = (
('2017', '2017'),
('2016', '2016'),
)
return MY_CHOICES
def ChoiceEstado(self):
MY_CHOICES = (
('Todos', 'Todos'),
('Aprobado', 'Aprobado'),
('No_aprobado', 'No aprobado'),
('Pendiente', 'Pendiente'),
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['anio'] = forms.ChoiceField(choices=self.ChoiceAnio())
self.fields['estado'] = forms.ChoiceField(choices=self.ChoiceEstado())
self.fields['alumno'].widget.attrs.update({'class': 'form-control'})
self.fields['anio'].widget.attrs.update({'class': 'form-control'})
self.fields['numero_expediente'].widget.attrs.update({'class': 'form-control'})
self.fields['estado'].widget.attrs.update({'class': 'form-control'})
|
mit
| 4,555,708,221,566,041,000
| 35.793893
| 126
| 0.578423
| false
| 3.632253
| false
| false
| false
|
daviddeng/azrael
|
demos/ctrl_swarm.py
|
1
|
4132
|
# Copyright 2014, Oliver Nagy <olitheolix@gmail.com>
#
# This file is part of Azrael (https://github.com/olitheolix/azrael)
#
# Azrael is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Azrael is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Azrael. If not, see <http://www.gnu.org/licenses/>.
"""
Manoeuvre the swarm of cubes in an orchestrated fashion.
Due to the lack of any feedback control the cubes may not move too orderly but
it suffices to demonstrate the principle.
"""
import os
import sys
import time
import setproctitle
import multiprocessing
# Augment the Python path so that we can include the main project.
p = os.path.dirname(os.path.abspath(__file__))
p = os.path.join(p, '..')
sys.path.insert(0, p)
del p
import azrael.client
import azrael.config as config
class ControllerCubeLeft(multiprocessing.Process):
def __init__(self, objID, ip=config.addr_clerk, port=config.port_clerk):
super().__init__()
self.left = 0
self.right = 1
self.ip = ip
self.port = port
self.objID = objID
def run(self):
client = azrael.client.Client(ip=self.ip, port_clerk=self.port)
# ---------------------------------------------------------------------
# Edit here to change the force of boosters.
# ---------------------------------------------------------------------
# Turn both boosters on after 2s.
left = types.CmdBooster(self.left, force=0.1)
right = types.CmdBooster(self.right, force=0.1)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 1'.format(self.objID))
time.sleep(2)
# Fire the booster asymmetrically to make the cube turn.
left = types.CmdBooster(self.left, force=0)
right = types.CmdBooster(self.right, force=1)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 2'.format(self.objID))
time.sleep(2)
# Reverse the force settings to stop the spinning.
left = types.CmdBooster(self.left, force=1)
right = types.CmdBooster(self.right, force=0)
client.controlParts(self.objID, [right, left], [])
print('{0:02d}: Manoeuvre 3'.format(self.objID))
time.sleep(2)
# Use the same force on both boosters to just move forward without
# inducing any more spinning.
left = types.CmdBooster(self.left, force=0.1)
right = types.CmdBooster(self.right, force=0.1)
client.controlParts(self.objID, [right, left], [])
time.sleep(4)
# Done.
print('{0:02d}: Manoeuvre 4'.format(self.objID))
class ControllerCubeRight(ControllerCubeLeft):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Swap the index for left/right compared to the base class.
self.left = 1
self.right = 0
def main():
addr = config.addr_clerk
# Controllers for columns 1, 2, 3, 4.
CCL, CCR = ControllerCubeLeft, ControllerCubeRight
group_1 = [CCL(4 * _ + 0, addr) for _ in range(1, 5)]
group_2 = [CCL(4 * _ + 1, addr) for _ in range(1, 5)]
group_3 = [CCR(4 * _ + 2, addr) for _ in range(1, 5)]
group_4 = [CCR(4 * _ + 3, addr) for _ in range(1, 5)]
# Start the cubes in the two outer columns.
time.sleep(0.5)
for p0, p1 in zip(group_1, group_4):
p0.start()
p1.start()
time.sleep(0.5)
# Start the cubes in the two inner columns.
time.sleep(1)
for p0, p1 in zip(group_2, group_3):
p0.start()
p1.start()
time.sleep(0.5)
print('done')
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,619,977,157,674,257,000
| 32.056
| 79
| 0.616167
| false
| 3.364821
| false
| false
| false
|
scott-maddox/simplepl
|
src/simplepl/dialogs/lockin_config_dialog.py
|
1
|
4145
|
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# third party imports
from PySide import QtGui, QtCore
class LockinConfigDialog(QtGui.QDialog):
def __init__(self, lockin, parent=None):
super(LockinConfigDialog, self).__init__(parent)
self.setModal(True)
settings = QtCore.QSettings()
timeConstantIndex = int(settings.value('lockin/time_constant_index',
9)) # 300 ms default
reserveModeIndex = int(settings.value('lockin/reserve_mode_index',
0)) # High reserve default
inputLineFilterIndex = int(settings.value('lockin/input_line_filter_index',
3)) # both filters default
self.timeConstantComboBox = QtGui.QComboBox()
for text in lockin.getTimeConstantLabelsList():
self.timeConstantComboBox.addItem(text)
self.timeConstantComboBox.setCurrentIndex(timeConstantIndex)
self.reserveModeComboBox = QtGui.QComboBox()
self.reserveModeComboBox.addItem('High Reserve')
self.reserveModeComboBox.addItem('Normal')
self.reserveModeComboBox.addItem('Low Noise (minimum)')
self.reserveModeComboBox.setCurrentIndex(reserveModeIndex)
self.inputLineFilterComboBox = QtGui.QComboBox()
self.inputLineFilterComboBox.addItem('no filters')
self.inputLineFilterComboBox.addItem('line notch filter')
self.inputLineFilterComboBox.addItem('2x line notch filter')
self.inputLineFilterComboBox.addItem('both notch filters')
self.inputLineFilterComboBox.setCurrentIndex(inputLineFilterIndex)
layout = QtGui.QVBoxLayout(self)
form = QtGui.QFormLayout()
form.addRow('Time Constant', self.timeConstantComboBox)
form.addRow('Reserve Mode', self.reserveModeComboBox)
form.addRow('Input Line Filter', self.inputLineFilterComboBox)
layout.addLayout(form)
# OK and Cancel buttons
self.buttons = QtGui.QDialogButtonBox(
QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
layout.addWidget(self.buttons)
# Connect buttons
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getLockinConfig(lockin, parent=None):
'''
Returns (timeConstantIndex, reserveModeIndex, inputLineFilterIndex,
accepted), and changes the corresponding values in the settings.
'''
dialog = LockinConfigDialog(lockin, parent)
result = dialog.exec_()
accepted = (result == QtGui.QDialog.Accepted)
timeConstantIndex = dialog.timeConstantComboBox.currentIndex()
reserveModeIndex = dialog.reserveModeComboBox.currentIndex()
inputLineFilterIndex = dialog.inputLineFilterComboBox.currentIndex()
settings = QtCore.QSettings()
settings.setValue('lockin/time_constant_index', timeConstantIndex)
settings.setValue('lockin/reserve_mode_index', reserveModeIndex)
settings.setValue('lockin/input_line_filter_index',
inputLineFilterIndex)
settings.sync()
return timeConstantIndex, reserveModeIndex, \
inputLineFilterIndex, accepted
|
agpl-3.0
| -6,380,274,891,226,863,000
| 42.177083
| 83
| 0.666104
| false
| 4.456989
| false
| false
| false
|
eawag-rdm/xlsxtocsv
|
xlsxtocsv/xlsxtocsv.py
|
1
|
3605
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime as dt
from Tkinter import Tk
import tkFileDialog
import openpyxl as op
import argparse
import os.path
import sys
import re
import csv
__metaclass__ = type
class RFC4180(csv.Dialect):
def __init__(self):
csv.Dialect.__init__(self)
delimiter = b','
doublequote = True
escapechar = None
lineterminator = b'\r\n'
quotechar = b'"'
quoting = csv.QUOTE_MINIMAL
skipinitialspace = False
stric = True
def parseargs():
pa = argparse.ArgumentParser(description=
'Exports multiple CSV files from an Excel *.xlsx Workbook')
pa.add_argument('-f', metavar='EXCELFILE',
help='The Excel file to export. ' +
'If omitted, a graphical file chooser will be used.')
pa.add_argument('-o', metavar='OUTPUTDIRECTORY',
help='The output directory. Default is the current ' +
'directory if EXCELFILE was given, otherwise a ' +
'file chooser will be used as well.')
args = pa.parse_args(sys.argv[1:])
return vars(args)
def _stringify(dat):
if not isinstance(dat, basestring):
return str(dat).encode('utf-8')
else:
return dat.encode('utf-8')
def _transmap(dat):
transmap = {
# empty cells are going to be empty strings
None: '',
# workaround for bug in openpyxl
# https://bitbucket.org/openpyxl/openpyxl/issues/674/
dt.datetime(1899, 12, 30, 0, 0): dt.time(0, 0),
dt.datetime(1899, 12, 31, 0, 0): dt.datetime(1900, 1, 1, 0, 0),
}
return transmap[dat] if dat in transmap else dat
def _datefix(dat):
# if typ is datetime.datetime and time-part is 0:0:0,
# covert to datetime.date (assume xlsx cell-type is "Date").
if (type(dat) == dt.datetime and
(dat.hour, dat.minute, dat.second) == (0, 0, 0)):
dat = dat.date()
return dat
def transform(l):
l = [_transmap(f) for f in l]
l = [_datefix(f) for f in l]
l = [_stringify(f) for f in l]
return l
def write_csv(data, outfile):
with open(outfile, 'wb') as fout:
writer = csv.writer(fout, dialect='RFC4180')
writer.writerows(data)
def main():
csv.register_dialect(u'RFC4180', RFC4180)
home = os.path.expanduser('~')
xlsxfile = parseargs()['f']
out_dir = parseargs()['o']
if xlsxfile is None:
root = Tk()
root.withdraw()
f = tkFileDialog.askopenfile(title='Choose file to convert',
filetypes=[('xlsx', '*.xlsx')],
initialdir=home)
if f:
xlsxfile = f.name
f.close()
else:
sys.exit()
if out_dir is None:
out_dir = tkFileDialog.askdirectory(title='Choose output directory',
initialdir=home)
if not out_dir:
sys.exit()
root.destroy()
if not out_dir:
out_dir = os.getcwd()
out_prefix = os.path.splitext(os.path.basename(xlsxfile))[0]
wb = op.load_workbook(xlsxfile, data_only=True)
for sn in wb.sheetnames:
outfile = os.path.join(out_dir, out_prefix + '_' +
re.sub(r'\s+', '_', sn) + '.csv')
data = []
sheet = wb.get_sheet_by_name(sn)
for l in sheet.values:
data.append(transform(l))
write_csv(data, outfile)
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,526,891,086,674,498,000
| 30.347826
| 82
| 0.561165
| false
| 3.601399
| false
| false
| false
|
rboman/progs
|
bin/powergrep.py
|
1
|
3561
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# try to replace "old_div(a,b)"" by "a/b"
# with a and b being complex expressions involving brackets, etc.
# processes all the python files recursively from the current folder
#
# you must use the script several times
# (it processes 1 "old_div" per line at a time)
# Does not process old_divs spanning several lines such as
# old_div(a,
# b)
import sys, os
import fnmatch, re
import subprocess
def all_files(root,
patterns='*',
skips='*.svn*;*.git*;*build*',
single_level=False,
yield_folders=False):
#self.checkPath(root)
patterns = patterns.split(';')
skips = skips.split(';')
for path, subdirs, files in os.walk(root):
# print('processing folder', path)
if yield_folders:
files.extend(subdirs)
files.sort()
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
fullname = os.path.join(path, name)
ok = True
for skip in skips:
if fnmatch.fnmatch(fullname, skip):
ok = False
if ok:
yield fullname
break
if single_level:
break
def paren_matcher (n):
# poor man's matched paren scanning, gives up
# after n+1 levels. Matches any string with balanced
# parens inside; add the outer parens yourself if needed.
# Nongreedy.
# https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
return r"[^()]*?(?:\("*n+r"[^()]*?"+r"\)[^()]*?)*?"*n
if __name__ == '__main__':
# the regexp
reg = re.compile("old_div\s*\(("+paren_matcher(5)+'),('+paren_matcher(5)+')\)')
# loop recursively on all files with a given extension
for f in all_files(os.getcwd(), patterns='*.py;*.pyw'):
#print('f=',f)
# red the whole file
file = open(f, mode='r', encoding='utf-8')
try:
alllines = file.readlines()
except:
print(f'\nERROR: file {f} contains non-unicode characters!\n')
raise
file.close()
newlines = []
modified = False
for l in alllines:
m = reg.search(l)
if m:
print(f"match found in {f}")
g = m.groups()
if len(g)!=2:
raise Exception ("=> ERROR: {len(g)} arguments found instead of 2!")
else:
#print(f'\t{m.group(0)} => {g[0].strip()}/{g[1].strip()}')
newl = l.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}')
print("\told string:", l.rstrip())
print("\tnew string:", newl.rstrip())
newlines.append(newl)
modified = True
else:
newlines.append(l)
if modified:
file = open(f, mode='w', encoding='utf-8')
for l in newlines:
file.write(l)
file.close()
"""
with open(f, "rb") as source:
m = reg.search(s1)
# print(m)
if m:
g = m.groups()
if len(g)!=2:
print ("error:")
print (g)
else:
print(f'{m.group(0)} => {g[0].strip()}/{g[1].strip()}')
print("old string:", s1)
print("new string:", s1.replace(m.group(0), f'{g[0].strip()}/{g[1].strip()}'))
"""
|
apache-2.0
| 5,097,906,595,243,001,000
| 30.236842
| 99
| 0.495366
| false
| 3.764271
| false
| false
| false
|
soupmonkey/pushcoin
|
PoS/payment-processor/settings.py
|
1
|
1339
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Slawomir Lisznianski <sl@minta.com>
#
# GNU General Public Licence (GPL)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
#
# PushCoin Error Codes from
# https://pushcoin.com/Pub/SDK/ErrorCodes
#
ERR_ACCOUNT_NOT_FOUND=201
ERR_INVALID_CURRENCY=202
ERR_PAYMENT_SIGNATURE_CHECK_FAILED=203
ERR_CRYPTO_FAILURE=204
ERR_INVALID_GRATUITY_TYPE=205
ERR_VALUE_OUT_OF_RANGE=206
ERR_INVALID_RECIPIENT=207
ERR_EXPIRED_PTA=208
ERR_DUPLICATE_PTA=209
ERR_INSUFFICIENT_FUNDS=300
MAX_SCALE_VAL = 6
MERCHANT_MAT = '5bf54dd118bc866567061a2be41860f7b5389f7c'
CURRENCY_CODE = 'USD'
PUSHCOIN_SERVER_URL = 'https://api.pushcoin.com:20001/pcos/'
|
gpl-3.0
| -4,831,188,054,566,369,000
| 33.333333
| 79
| 0.765497
| false
| 3.099537
| false
| false
| false
|
PuzzleboxIO/brainstorms-python
|
setup.py2app.py
|
1
|
3818
|
"""
This is a setup.py script generated by py2applet
Usage:
python2.7 setup.py py2app
"""
from setuptools import setup
APP = ['brainstorms-local.py']
data_files=[ \
(".", \
#("Content/Resources", \
["puzzlebox_brainstorms_configuration.ini"]),
("images", \
["images/puzzlebox.ico", \
"images/puzzlebox.icns", \
"images/puzzlebox_logo.png", \
"images/1-upper_left-orange.png", \
"images/1-upper_left-white.png", \
"images/2-up-orange.png", \
"images/2-up-white.png", \
"images/3-upper_right-orange.png", \
"images/3-upper_right-white.png", \
"images/7-lower_left-orange.png", \
"images/7-lower_left-white.png", \
"images/8-down-orange.png", \
"images/8-down-white.png", \
"images/9-lower_right-orange.png", \
"images/9-lower_right-white.png", \
"images/brainstorms-aileron_left.svg", \
"images/brainstorms-aileron_right.svg", \
"images/brainstorms-elevator_forward.svg", \
"images/brainstorms-elevator_reverse.svg", \
"images/brainstorms-fly_forward.svg", \
"images/brainstorms-hover.svg", \
"images/brainstorms-land_arrow.svg", \
"images/brainstorms-rudder-left.svg", \
"images/brainstorms-rudder-right.svg", \
"images/brainstorms_stop.svg", \
"images/brainstorms_wheelchair_forward.svg", \
"images/brainstorms_wheelchair_left.svg", \
"images/brainstorms_wheelchair_reverse.svg", \
"images/brainstorms_wheelchair_right.svg", \
"images/braintorms-throttle_up.svg", \
"images/puzzlebox_helicopter.svg", \
]), \
#("qt_menu.nib", \
#["/opt/local/lib/Resources/qt_menu.nib/classes.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/info.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \
#]), \
]
data_files=[]
OPTIONS = { \
#'argv_emulation': True, \
'argv_emulation': False, \
'iconfile': 'images/puzzlebox.icns', \
'strip': True, \
# Semi-standalone is an option you can enable with py2app that makes
# your code reliant on the version of Python that is installed with the OS.
# You also need to enable site-packages, as well (which apparently encourages
# py2app to create the links to Python necessary for getting the bundle up
# and running, although it's only supposed to tell it to include the
# system and user site-packages in the system path)
# http://beckism.com/2009/03/pyobjc_tips/
#'semi_standalone': True, \
#'site_packages': True, \
'includes': [ \
'PySide.QtSvg', \
], \
'excludes': ['PyQt4', 'sip'], \
'frameworks': [ \
"/opt/local/share/qt4/plugins/imageformats/libqjpeg.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqgif.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqico.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqmng.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqsvg.dylib", \
"/opt/local/share/qt4/plugins/imageformats/libqtiff.dylib", \
], \
"resources": [ \
"puzzlebox_brainstorms_configuration.ini", \
#"images/puzzlebox.ico", \
#"/opt/local/lib/Resources/qt_menu.nib/classes.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/info.nib", \
#"/opt/local/lib/Resources/qt_menu.nib/keyedobjects.nib", \
], \
}
setup(
name='Puzzlebox Brainstorms',
version='0.8.0',
description='Puzzlebox Brainstorms provides Brain-Computer Interface (BCI) controls for robots and devices',
author='Steve Castellotti',
author_email='sc@puzzlebox.info',
url='http://brainstorms.puzzlebox.info',
classifiers=[ \
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python',
'Operating System :: OS Independent',
'License :: Commercial',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
],
app=APP,
data_files=data_files,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
agpl-3.0
| -7,486,098,163,901,852,000
| 30.04065
| 109
| 0.680723
| false
| 2.666201
| false
| false
| false
|
nmih/ssbio
|
ssbio/databases/pdb.py
|
1
|
34959
|
"""
PDBProp
=======
"""
import gzip
import json
import logging
import os.path as op
import mmtf
import os
from cobra.core import DictList
import pandas as pd
import requests
import deprecation
from Bio.PDB import PDBList
from lxml import etree
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import ssbio.databases.pisa as pisa
import ssbio.utils
from ssbio.protein.structure.structprop import StructProp
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class PDBProp(StructProp):
"""Store information about a protein structure from the Protein Data Bank.
Extends the :class:`~ssbio.protein.structure.structprop.StructProp` class to allow initialization of the structure
by its PDB ID, and then enabling downloads of the structure file as well as parsing its metadata.
Args:
ident (str):
description (str):
chains (str):
mapped_chains (str):
structure_path (str):
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, description=None, chains=None, mapped_chains=None, structure_path=None, file_type=None):
StructProp.__init__(self, ident, description=description, chains=chains, mapped_chains=mapped_chains,
is_experimental=True, structure_path=structure_path, file_type=file_type)
self.experimental_method = None
self.resolution = None
self.date = None
self.taxonomy_name = None
self.biological_assemblies = DictList()
"""DictList: A list for storing Bioassembly objects related to this PDB ID"""
def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun)))
def get_pisa_complex_predictions(self, outdir, existing_pisa_multimer_xml=None):
if not existing_pisa_multimer_xml:
pisa_xmls = pisa.download_pisa_multimers_xml(pdb_ids=self.id, outdir=outdir,
save_single_xml_files=True)
else:
pisa_xmls = {}
pisa_xmls[self.id] = existing_pisa_multimer_xml
pisa_dict = pisa.parse_pisa_multimers_xml(pisa_xmls[self.id], download_structures=True,
outdir=outdir)
def __json_encode__(self):
# TODO: investigate why saving with # does not work!
to_return = {}
for x in self.__dict__.keys():
if x == 'pdb_title' or x == 'description':
sanitized = ssbio.utils.force_string(getattr(self, x)).replace('#', '-')
else:
to_return.update({x: getattr(self, x)})
return to_return
def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict
def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile
def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict
def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation
def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data
def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False):
"""Returns a list of BLAST hits of a sequence to available structures in the PDB.
Args:
seq (str): Your sequence, in string format
outfile (str): Name of output file
outdir (str, optional): Path to output directory. Default is the current directory.
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default).
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
link (bool, optional): Set to True if a link to the HTML results should be displayed
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
Returns:
list: Rank ordered list of BLAST hits in dictionaries.
"""
if len(seq) < 12:
raise ValueError('Sequence must be at least 12 residues long.')
if link:
page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue)
print(page)
parser = etree.XMLParser(ns_clean=True)
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(force_rerun, outfile):
# Load the BLAST XML results if force_rerun=True
page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format(
seq, evalue)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if outfile:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded BLAST results from REST server')
else:
log.error('BLAST request timed out')
return []
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing BLAST XML results'.format(outfile))
# Get length of original sequence to calculate percentages
len_orig = float(len(seq))
root = tree.getroot()
hit_list = []
for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'):
info = {}
hitdef = hit.find('Hit_def')
if hitdef is not None:
info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower()
info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',')
# One PDB can align to different parts of the sequence
# Will just choose the top hit for this single PDB
hsp = hit.findall('Hit_hsps/Hsp')[0]
# Number of identical residues
hspi = hsp.find('Hsp_identity')
if hspi is not None:
info['hit_num_ident'] = int(hspi.text)
info['hit_percent_ident'] = int(hspi.text)/len_orig
if int(hspi.text)/len_orig < seq_ident_cutoff:
log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0]))
continue
# Number of similar residues (positive hits)
hspp = hsp.find('Hsp_positive')
if hspp is not None:
info['hit_num_similar'] = int(hspp.text)
info['hit_percent_similar'] = int(hspp.text) / len_orig
# Total number of gaps (unable to align in either query or subject)
hspg = hsp.find('Hsp_gaps')
if hspg is not None:
info['hit_num_gaps'] = int(hspg.text)
info['hit_percent_gaps'] = int(hspg.text) / len_orig
# E-value of BLAST
hspe = hsp.find('Hsp_evalue')
if hspe is not None:
info['hit_evalue'] = float(hspe.text)
# Score of BLAST
hsps = hsp.find('Hsp_score')
if hsps is not None:
info['hit_score'] = float(hsps.text)
hit_list.append(info)
log.debug("{}: Number of BLAST hits".format(len(hit_list)))
return hit_list
def blast_pdb_df(blast_results):
"""Make a dataframe of BLAST results"""
cols = ['hit_pdb', 'hit_pdb_chains', 'hit_evalue', 'hit_score', 'hit_num_ident', 'hit_percent_ident',
'hit_num_similar', 'hit_percent_similar', 'hit_num_gaps', 'hit_percent_gaps']
return pd.DataFrame.from_records(blast_results, columns=cols)
def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p
def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date
def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols
def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
#
# if not outdir:
# outdir = os.getcwd()
# outfile = op.join(outdir, '{}.xml'.format(self.id))
#
# if ssbio.utils.force_rerun(force_rerun, outfile):
# page = 'https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId={}&nr={}'.format(
# self.original_pdb_id, biomol_num)
# req = requests.get(page)
#
# if req.status_code == 200:
# response = req.text
#
# # Save the XML file
# if cache:
# with open(outfile, 'w') as f:
# f.write(response)
#
# # Parse the XML string
# r = xmltodict.parse(response)
# log.debug('Loaded bioassembly information from REST server')
# else:
# log.error('Request timed out')
# req.raise_for_status()
# else:
# with open(outfile, 'r') as f:
# r = xmltodict.parse(f.read())
# log.debug('{}: Loaded existing XML results'.format(outfile))
#
# self.biomol_to_chain_dict[biomol_num] = {'chains': r['bioassembly']['transformations']['@chainIds'],
# 'multiplier': len(r['bioassembly']['transformations']['transformation'])}
# # TODO: figure out how to store matrices etc.
#
# log.info('{}_{}: ')
def download_biomol(pdb_id, biomol_num, outdir, file_type='pdb', force_rerun=False):
import zlib
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import contextlib
ssbio.utils.make_dir(outdir)
server_folder = pdb_id[1:3]
if file_type == 'pdb':
# server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(server_folder)
server = 'https://files.rcsb.org/download/'
server_filename = pdb_id + '.pdb%i.gz' % biomol_num
local_filename = pdb_id + '_bio%i.pdb' % biomol_num
outfile = op.join(outdir, local_filename)
elif file_type.lower() == 'mmcif' or file_type.lower() == 'cif':
server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/mmCIF/divided/{}/'.format(server_folder)
server_filename = pdb_id + '-assembly%i.cif.gz' % biomol_num
local_filename = pdb_id + '_bio%i.cif' % biomol_num
outfile = op.join(outdir, local_filename)
else:
raise ValueError('Biological assembly only available in PDB or mmCIF file types.')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = op.join(server, server_filename)
try:
with contextlib.closing(urlopen(download_link)) as f:
decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS)
with open(op.join(outdir, local_filename), 'wb') as f:
f.write(decompressed_data)
except URLError as e:
print(e)
return None
return outfile
########################################################################################################################
########################################################################################################################
# DEPRECATED FUNCTIONS
########################################################################################################################
########################################################################################################################
@deprecation.deprecated(deprecated_in="1.0", removed_in="2.0",
details="Use Biopython's PDBList.retrieve_pdb_file function instead")
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile
|
mit
| 1,119,046,539,905,341,600
| 40.225236
| 268
| 0.615778
| false
| 3.782623
| false
| false
| false
|
tensorflow/probability
|
tensorflow_probability/python/optimizer/linesearch/hager_zhang.py
|
1
|
30378
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implements the Hager-Zhang inexact line search algorithm.
Line searches are a central component for many optimization algorithms (e.g.
BFGS, conjugate gradient etc). Most of the sophisticated line search methods
aim to find a step length in a given search direction so that the step length
satisfies the
[Wolfe conditions](https://en.wikipedia.org/wiki/Wolfe_conditions).
[Hager-Zhang 2006](https://epubs.siam.org/doi/abs/10.1137/030601880)
algorithm is a refinement of the commonly used
[More-Thuente](https://dl.acm.org/citation.cfm?id=192132) algorithm.
This module implements the Hager-Zhang algorithm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl
__all__ = [
'hager_zhang',
]
def _machine_eps(dtype):
"""Returns the machine epsilon for the supplied dtype."""
dtype = dtype_util.as_numpy_dtype(tf.as_dtype(dtype))
return np.finfo(dtype).eps
HagerZhangLineSearchResult = collections.namedtuple(
'HagerZhangLineSearchResults', [
'converged', # Whether a point satisfying Wolfe/Approx wolfe was found.
'failed', # Whether the line search failed. It can fail if either the
# objective function or the gradient are not finite at
# an evaluation point.
'func_evals', # Number of function evaluations made.
'iterations', # Number of line search iterations made.
'left', # The left end point of the final bracketing interval.
# If converged is True, it is equal to `right`.
# Otherwise, it corresponds to the last interval computed.
'right' # The right end point of the final bracketing interval.
# If converged is True, it is equal to `left`.
# Otherwise, it corresponds to the last interval computed.
])
def hager_zhang(value_and_gradients_function,
initial_step_size=None,
value_at_initial_step=None,
value_at_zero=None,
converged=None,
threshold_use_approximate_wolfe_condition=1e-6,
shrinkage_param=0.66,
expansion_param=5.0,
sufficient_decrease_param=0.1,
curvature_param=0.9,
max_iterations=50,
name=None):
"""The Hager Zhang line search algorithm.
Performs an inexact line search based on the algorithm of
[Hager and Zhang (2006)][2].
The univariate objective function `value_and_gradients_function` is typically
generated by projecting a multivariate objective function along a search
direction. Suppose the multivariate function to be minimized is
`g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish
to perform a line search. Then the projected univariate function to be used
for line search is
```None
f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)
```
The directional derivative along (d1, d2, ..., dn) is needed for this
procedure. This also corresponds to the derivative of the projected function
`f(a)` with respect to `a`. Note that this derivative must be negative for
`a = 0` if the direction is a descent direction.
The usual stopping criteria for the line search is the satisfaction of the
(weak) Wolfe conditions. For details of the Wolfe conditions, see
ref. [3]. On a finite precision machine, the exact Wolfe conditions can
be difficult to satisfy when one is very close to the minimum and as argued
by [Hager and Zhang (2005)][1], one can only expect the minimum to be
determined within square root of machine precision. To improve the situation,
they propose to replace the Wolfe conditions with an approximate version
depending on the derivative of the function which is applied only when one
is very close to the minimum. The following algorithm implements this
enhanced scheme.
### Usage:
Primary use of line search methods is as an internal component of a class of
optimization algorithms (called line search based methods as opposed to
trust region methods). Hence, the end user will typically not want to access
line search directly. In particular, inexact line search should not be
confused with a univariate minimization method. The stopping criteria of line
search is the satisfaction of Wolfe conditions and not the discovery of the
minimum of the function.
With this caveat in mind, the following example illustrates the standalone
usage of the line search.
```python
# Define value and gradient namedtuple
ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df'])
# Define a quadratic target with minimum at 1.3.
def value_and_gradients_function(x):
return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3))
# Set initial step size.
step_size = tf.constant(0.1)
ls_result = tfp.optimizer.linesearch.hager_zhang(
value_and_gradients_function, initial_step_size=step_size)
# Evaluate the results.
with tf.Session() as session:
results = session.run(ls_result)
# Ensure convergence.
assert results.converged
# If the line search converged, the left and the right ends of the
# bracketing interval are identical.
assert results.left.x == result.right.x
# Print the number of evaluations and the final step size.
print ("Final Step Size: %f, Evaluations: %d" % (results.left.x,
results.func_evals))
```
### References:
[1]: William Hager, Hongchao Zhang. A new conjugate gradient method with
guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,
pp. 170-172. 2005.
https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf
[2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate
gradient method with guaranteed descent. ACM Transactions on Mathematical
Software, Vol 32., 1, pp. 113-137. 2006.
http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf
[3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
Operations Research. pp 33-36. 2006
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or
a tensor of shape [n] in batching mode. The initial value (or values) to
try to bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
value_at_initial_step: (Optional) The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If supplied the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
value_at_zero: (Optional) The full return value of
value_and_gradients_function at `0.`, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not supplied the tuple
will be computed by evaluating value_and_gradients_function.
converged: (Optional) In batching mode a tensor of shape [n], indicating
batch members which have already converged and no further search should
be performed. These batch members are also reported as converged in the
output, and both their `left` and `right` are set to the
`value_at_initial_step`.
threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`
of real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in
[Hager and Zhang (2006)][2].
If the secant**2 step does not shrink the bracketing interval by this
proportion, a bisection step is performed to reduce the interval width.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
max_iterations: Positive scalar `Tensor` of integral dtype or None. The
maximum number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'hager_zhang' is used.
Returns:
results: A namedtuple containing the following attributes.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the final bracketing interval. Values are
equal to those of `right` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the final bracketing interval. Values are
equal to those of `left` on batch members where converged is True.
Otherwise, it corresponds to the last interval computed.
"""
with tf.name_scope(name or 'hager_zhang'):
val_0, val_initial, f_lim, prepare_evals = _prepare_args(
value_and_gradients_function,
initial_step_size,
value_at_initial_step,
value_at_zero,
threshold_use_approximate_wolfe_condition)
valid_inputs = (hzl.is_finite(val_0) & (val_0.df < 0) &
tf.math.is_finite(val_initial.x) & (val_initial.x > 0))
if converged is None:
init_converged = tf.zeros_like(valid_inputs) # i.e. all false.
else:
init_converged = tf.convert_to_tensor(converged)
failed = ~init_converged & ~valid_inputs
init_interval = HagerZhangLineSearchResult(
converged=init_converged,
failed=failed,
func_evals=prepare_evals,
iterations=tf.convert_to_tensor(0),
left=val_0,
right=hzl.val_where(init_converged, val_0, val_initial))
def _apply_bracket_and_search():
"""Bracketing and searching to do for valid inputs."""
return _bracket_and_search(
value_and_gradients_function, init_interval, f_lim, max_iterations,
shrinkage_param, expansion_param, sufficient_decrease_param,
curvature_param)
init_active = ~init_interval.failed & ~init_interval.converged
return prefer_static.cond(
tf.reduce_any(init_active),
_apply_bracket_and_search,
lambda: init_interval)
_LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [
'iteration',
'found_wolfe',
'failed',
'num_evals',
'left',
'right'])
def _bracket_and_search(
value_and_gradients_function,
init_interval,
f_lim,
max_iterations,
shrinkage_param,
expansion_param,
sufficient_decrease_param,
curvature_param):
"""Brackets the minimum and performs a line search.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
init_interval: Instance of `HagerZhangLineSearchResults` containing
the initial line search interval. The gradient of init_interval.left must
be negative (i.e. must be a descent direction), while init_interval.right
must be positive and finite.
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
bracket_result = hzl.bracket(value_and_gradients_function, init_interval,
f_lim, max_iterations, expansion_param)
converged = init_interval.converged | _very_close(
bracket_result.left.x, bracket_result.right.x)
# We fail if we have not yet converged but already exhausted all iterations.
exhausted_iterations = ~converged & (
bracket_result.iteration >= max_iterations)
line_search_args = HagerZhangLineSearchResult(
converged=converged,
failed=bracket_result.failed | exhausted_iterations,
iterations=bracket_result.iteration,
func_evals=bracket_result.num_evals,
left=bracket_result.left,
right=bracket_result.right)
return _line_search_after_bracketing(
value_and_gradients_function, line_search_args, init_interval.left,
f_lim, max_iterations, sufficient_decrease_param, curvature_param,
shrinkage_param)
def _line_search_after_bracketing(
value_and_gradients_function,
search_interval,
val_0,
f_lim,
max_iterations,
sufficient_decrease_param,
curvature_param,
shrinkage_param):
"""The main loop of line search after the minimum has been bracketed.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
search_interval: Instance of `HagerZhangLineSearchResults` containing
the current line search interval.
val_0: A namedtuple as returned by value_and_gradients_function evaluated
at `0.`. The gradient must be negative (i.e. must be a descent direction).
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
converged: Boolean `Tensor` of shape [n]. Whether a point satisfying
Wolfe/Approx wolfe was found.
failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.
if either the objective function or the gradient are not finite at
an evaluation point.
iterations: Scalar int32 `Tensor`. Number of line search iterations made.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left: A namedtuple, as returned by value_and_gradients_function,
of the left end point of the updated bracketing interval.
right: A namedtuple, as returned by value_and_gradients_function,
of the right end point of the updated bracketing interval.
"""
def _loop_cond(curr_interval):
"""Loop condition."""
active = ~(curr_interval.converged | curr_interval.failed)
return (curr_interval.iterations <
max_iterations) & tf.reduce_any(active)
def _loop_body(curr_interval):
"""The loop body."""
secant2_raw_result = hzl.secant2(
value_and_gradients_function, val_0, curr_interval, f_lim,
sufficient_decrease_param, curvature_param)
secant2_result = HagerZhangLineSearchResult(
converged=secant2_raw_result.converged,
failed=secant2_raw_result.failed,
iterations=curr_interval.iterations + 1,
func_evals=secant2_raw_result.num_evals,
left=secant2_raw_result.left,
right=secant2_raw_result.right)
should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed)
def _do_check_shrinkage():
"""Check if interval has shrinked enough."""
old_width = curr_interval.right.x - curr_interval.left.x
new_width = secant2_result.right.x - secant2_result.left.x
sufficient_shrinkage = new_width < old_width * shrinkage_param
func_is_flat = (
_very_close(curr_interval.left.f, curr_interval.right.f) &
_very_close(secant2_result.left.f, secant2_result.right.f))
new_converged = (
should_check_shrinkage & sufficient_shrinkage & func_is_flat)
needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage
inner_bisect_args = secant2_result._replace(
converged=secant2_result.converged | new_converged)
def _apply_inner_bisect():
return _line_search_inner_bisection(
value_and_gradients_function, inner_bisect_args,
needs_inner_bisect, f_lim)
return prefer_static.cond(
tf.reduce_any(needs_inner_bisect),
_apply_inner_bisect,
lambda: inner_bisect_args)
next_args = prefer_static.cond(
tf.reduce_any(should_check_shrinkage),
_do_check_shrinkage,
lambda: secant2_result)
interval_shrunk = (
~next_args.failed & _very_close(next_args.left.x, next_args.right.x))
return [next_args._replace(converged=next_args.converged | interval_shrunk)]
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[search_interval],
parallel_iterations=1)[0]
def _line_search_inner_bisection(
value_and_gradients_function,
search_interval,
active,
f_lim):
"""Performs bisection and updates the interval."""
midpoint = (search_interval.left.x + search_interval.right.x) / 2
val_mid = value_and_gradients_function(midpoint)
is_valid_mid = hzl.is_finite(val_mid)
still_active = active & is_valid_mid
new_failed = active & ~is_valid_mid
next_inteval = search_interval._replace(
failed=search_interval.failed | new_failed,
func_evals=search_interval.func_evals + 1)
def _apply_update():
update_result = hzl.update(
value_and_gradients_function, next_inteval.left, next_inteval.right,
val_mid, f_lim, active=still_active)
return HagerZhangLineSearchResult(
converged=next_inteval.converged,
failed=next_inteval.failed | update_result.failed,
iterations=next_inteval.iterations + update_result.iteration,
func_evals=next_inteval.func_evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
return prefer_static.cond(
tf.reduce_any(still_active), _apply_update, lambda: next_inteval)
def _prepare_args(value_and_gradients_function,
initial_step_size,
val_initial,
val_0,
approximate_wolfe_threshold):
"""Prepares the arguments for the line search initialization.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that
correspond to scalar tensors of real dtype containing the point at which
the function was evaluated, the value of the function, and its
derivative at that point. The other namedtuple fields, if present,
should be tensors or sequences (possibly nested) of tensors.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
Alternatively, the function may represent the batching of `n` such line
functions (e.g. projecting a single multivariate objective function along
`n` distinct directions at once) accepting n points as input, i.e. a
tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned
namedtuple should each be a tensor of shape [n], with the corresponding
input points, function values, and derivatives at those input points.
initial_step_size: Scalar positive `Tensor` of real dtype, or a tensor of
shape [n] in batching mode. The initial value (or values) to try to
bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than 0.
A good initial value will make the search converge faster.
val_initial: The full return value of evaluating
value_and_gradients_function at initial_step_size, i.e. a namedtuple with
'x', 'f', 'df', if already known by the caller. If not None the value of
`initial_step_size` will be ignored, otherwise the tuple will be computed
by evaluating value_and_gradients_function.
val_0: The full return value of value_and_gradients_function at `0.`, i.e.
a namedtuple with 'x', 'f', 'df', if already known by the caller. If None
the tuple will be computed by evaluating value_and_gradients_function.
approximate_wolfe_threshold: Scalar positive `Tensor` of
real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
Returns:
left: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at `0.`.
val_initial: A namedtuple, as returned by value_and_gradients_function,
containing the value and derivative of the function at
`initial_step_size`.
f_lim: Real `Tensor` of shape [n]. The function value threshold for
the approximate Wolfe conditions to be checked.
eval_count: Scalar int32 `Tensor`. The number of target function
evaluations made by this function.
"""
eval_count = 0
if val_initial is None:
if initial_step_size is not None:
initial_step_size = tf.convert_to_tensor(initial_step_size)
else:
initial_step_size = np.float32(1.)
val_initial = value_and_gradients_function(initial_step_size)
eval_count += 1
if val_0 is None:
x_0 = tf.zeros_like(val_initial.x)
val_0 = value_and_gradients_function(x_0)
eval_count += 1
f_lim = val_0.f + (approximate_wolfe_threshold * tf.math.abs(val_0.f))
return val_0, val_initial, f_lim, tf.convert_to_tensor(eval_count)
def _very_close(x, y):
return tf.math.nextafter(x, y) >= y
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, 'True', 'False')
return x
# A convenience function useful while debugging in the graph mode.
def _print(pass_through_tensor, values):
"""Wrapper for tf.Print which supports lists and namedtuples for printing."""
flat_values = []
for value in values:
# Checks if it is a namedtuple.
if hasattr(value, '_fields'):
for field in value._fields:
flat_values.extend([field, _to_str(getattr(value, field))])
continue
if isinstance(value, (list, tuple)):
for v in value:
flat_values.append(_to_str(v))
continue
flat_values.append(_to_str(value))
return tf.Print(pass_through_tensor, flat_values)
|
apache-2.0
| 6,853,210,817,681,248,000
| 46.539906
| 94
| 0.697511
| false
| 3.993427
| false
| false
| false
|
tonyduckles/svn2svn
|
svn2svn/run/parse.py
|
1
|
2731
|
""" optparser helper functions """
import optparse
import textwrap
class HelpFormatter(optparse.IndentedHelpFormatter):
"""
Modified version of certain optparse.IndentedHelpFormatter methods:
* Respect line-breaks in parser.desription and option.help_text
* Vertically-align long_opts
Inspired by: http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e6b541a15bc2/09f28e26af0699b1?pli=1
"""
def format_description(self, description):
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [("%s" % (sopt)) if option._long_opts else \
(self._short_opt_fmt % (sopt, metavar))
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
return (" " if not short_opts else "")+(", ".join(short_opts + long_opts))
def format_option(self, option):
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def format_usage(self, usage):
return usage
|
gpl-3.0
| -6,258,201,666,401,967,000
| 40.378788
| 125
| 0.556573
| false
| 3.912607
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.