text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = 'anton@email.arizona.edu'
import sys
from optparse import OptionParser
import roslib
roslib.load_manifest('dynamixel_driver')
from dynamixel_driver import dynamixel_io
if __name__ == '__main__':
usage_msg = 'Usage: %prog [options] MOTOR_IDs'
desc_msg = 'Sets various configuration options of specified Dynamixel servo motor.'
epi_msg = 'Example: %s --port=/dev/ttyUSB1 --baud=57600 --baud-rate=1 --return-delay=1 5 9 23' % sys.argv[0]
parser = OptionParser(usage=usage_msg, description=desc_msg, epilog=epi_msg)
parser.add_option('-p', '--port', metavar='PORT', default='/dev/ttyUSB0',
help='motors of specified controllers are connected to PORT [default: %default]')
parser.add_option('-b', '--baud', metavar='BAUD', type='int', default=1000000,
help='connection to serial port will be established at BAUD bps [default: %default]')
parser.add_option('-r', '--baud-rate', type='int', metavar='RATE', dest='baud_rate',
help='set servo motor communication speed')
parser.add_option('-d', '--return-delay', type='int', metavar='DELAY', dest='return_delay',
help='set servo motor return packet delay time')
parser.add_option('--cw-angle-limit', type='int', metavar='CW_ANGLE', dest='cw_angle_limit',
help='set servo motor CW angle limit')
parser.add_option('--ccw-angle-limit', type='int', metavar='CCW_ANGLE', dest='ccw_angle_limit',
help='set servo motor CCW angle limit')
parser.add_option('--min-voltage-limit', type='int', metavar='MIN_VOLTAGE', dest='min_voltage_limit',
help='set servo motor minimum voltage limit')
parser.add_option('--max-voltage-limit', type='int', metavar='MAX_VOLTAGE', dest='max_voltage_limit',
help='set servo motor maximum voltage limit')
(options, args) = parser.parse_args(sys.argv)
print options
if len(args) < 2:
parser.print_help()
exit(1)
port = options.port
baudrate = options.baud
motor_ids = args[1:-2]
print 'motor ids', motor_ids
try:
dxl_io = dynamixel_io.DynamixelIO(port, baudrate)
except dynamixel_io.SerialOpenError, soe:
print 'ERROR:', soe
else:
for motor_id in motor_ids:
print motor_id, type(motor_id)
motor_id = int(motor_id)
ping_res = dxl_io.ping(motor_id)
if ping_res:
# check if baud rate needs to be changed
if options.baud_rate:
valid_rates = (1,3,4,7,9,16,34,103,207,250,251,252)
if options.baud_rate not in valid_rates:
print 'Requested baud rate is invalid, please use one of the following: %s' % str(valid_rates)
if options.baud_rate <= 207:
print 'Setting baud rate to %d bps' % int(2000000.0/(options.baud_rate + 1))
elif options.baud_rate == 250:
print 'Setting baud rate to %d bps' % 2250000
elif options.baud_rate == 251:
print 'Setting baud rate to %d bps' % 2500000
elif options.baud_rate == 252:
print 'Setting baud rate to %d bps' % 3000000
dxl_io.set_baud_rate(motor_id, options.baud_rate)
# check if return delay time needs to be changed
if options.return_delay is not None:
if options.return_delay < 0 or options.return_delay > 254:
print 'Requested return delay time is out of valie range (0 - 254)'
print 'Setting return delay time to %d us' % (options.return_delay * 2)
dxl_io.set_return_delay_time(motor_id, options.return_delay)
# check if CW angle limit needs to be changed
if options.cw_angle_limit is not None:
print 'Setting CW angle limit to %d' % options.cw_angle_limit
dxl_io.set_angle_limit_cw(motor_id, options.cw_angle_limit)
# check if CCW angle limit needs to be changed
if options.ccw_angle_limit is not None:
print 'Setting CCW angle limit to %d' % options.ccw_angle_limit
dxl_io.set_angle_limit_ccw(motor_id, options.ccw_angle_limit)
else:
print "NOT SETTING CCW ANGLE LIMIT"
# check if minimum voltage limit needs to be changed
if options.min_voltage_limit:
print 'Setting minimum voltage limit to %d' % options.min_voltage_limit
dxl_io.set_voltage_limit_min(motor_id, options.min_voltage_limit)
# check if maximum voltage limit needs to be changed
if options.max_voltage_limit:
print 'Setting maximum voltage limit to %d' % options.max_voltage_limit
dxl_io.set_voltage_limit_max(motor_id, options.max_voltage_limit)
print 'done'
else:
print 'Unable to connect to Dynamixel motor with ID %d' % motor_id
|
ufieeehw/IEEE2015
|
ros/dynamixel_motor/dynamixel_driver/scripts/set_servo_config.py
|
Python
|
gpl-2.0
| 7,282
| 0.006866
|
from direct.distributed import DistributedObject
class DistributedTestObject(DistributedObject.DistributedObject):
def setRequiredField(self, r):
self.requiredField = r
def setB(self, B):
self.B = B
def setBA(self, BA):
self.BA = BA
def setBO(self, BO):
self.BO = BO
def setBR(self, BR):
self.BR = BR
def setBRA(self, BRA):
self.BRA = BRA
def setBRO(self, BRO):
self.BRO = BRO
def setBROA(self, BROA):
self.BROA = BROA
def gotNonReqThatWasntSet(self):
for field in ('B', 'BA', 'BO', 'BR', 'BRA', 'BRO', 'BROA'):
if hasattr(self, field):
return True
return False
|
ToontownUprising/src
|
otp/distributed/DistributedTestObject.py
|
Python
|
mit
| 717
| 0.001395
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csrf_example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
consideratecode/csrf_example
|
manage.py
|
Python
|
mit
| 810
| 0
|
from django.http import HttpResponseRedirect
from django.utils.encoding import smart_str
def serve_file(request, file, **kwargs):
"""Serves files by redirecting to file.url (e.g., useful for Amazon S3)"""
return HttpResponseRedirect(smart_str(file.url))
def public_download_url(file, **kwargs):
"""Directs downloads to file.url (useful for normal file system storage)"""
return file.url
|
GDGLima/contentbox
|
third_party/filetransfers/backends/url.py
|
Python
|
apache-2.0
| 405
| 0.004938
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from OvmCommonModule import *
import traceback
import time
import re
logger = OvmLogger("OvmNetwork")
class Filter:
class Network:
IFNAME_LO = r'(lo)'
IFNAME_BRIDGE = r'(xenbr\d+|vlan\d+)'
IFNAME_PIF = r'(eth\d+$|bond\d+$)'
IFNAME_VLAN = r'(eth\d+.\d+$|bond\d+.\d+$)'
class Parser(object):
'''
classdocs
'''
def findall(self, pattern, samples):
"""
@param pattern: search pattern
@param result: Parser line execution result
@return : list of search
find result of Parser which has same pattern
findall Parser find all pattern in a string
"""
result = []
for line in samples:
items = re.findall(pattern, line)
for item in items:
result.append(item)
return result
def checkPattern(self, pattern, cmd_result):
"""
@param pattern: search pattern
@param cmd_result: Parser line execution result
@return : True (if pattern is occurred)
"""
for line in cmd_result:
items = re.findall(pattern, line)
if len(items) > 0:
return True
return False
def search(self, cmd_result, pattern):
return None
class OvmVlanDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vlan = OvmVlan()
setAttrFromDict(vlan, 'vid', deDict, int)
setAttrFromDict(vlan, 'pif', deDict)
return vlan
class OvmVlanEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVlan): raise Exception("%s is not instance of OvmVlan"%type(obj))
dct = {}
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'vid')
safeDictSet(obj, dct, 'pif')
return dct
def toOvmVlan(jStr):
return json.loads(jStr, cls=OvmVlanDecoder)
def fromOvmVlan(vlan):
return normalizeToGson(json.dumps(vlan, cls=OvmVlanEncoder))
class OvmBridgeDecoder(json.JSONDecoder):
def decode(self, jStr):
deDic = asciiLoads(jStr)
bridge = OvmBridge()
setAttrFromDict(bridge, 'name', deDic)
setAttrFromDict(bridge, 'attach', deDic)
return bridge
class OvmBridgeEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmBridge): raise Exception("%s is not instance of OvmBridge"%type(obj))
dct = {}
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'attach')
safeDictSet(obj, dct, 'interfaces')
return dct
def toOvmBridge(jStr):
return json.loads(jStr, cls=OvmBridgeDecoder)
def fromOvmBridge(bridge):
return normalizeToGson(json.dumps(bridge, cls=OvmBridgeEncoder))
class OvmInterface(OvmObject):
name = ''
class OvmVlan(OvmInterface):
vid = 0
pif = ''
class OvmBridge(OvmInterface):
attach = ''
interfaces = []
class OvmNetwork(OvmObject):
'''
Network
'''
@property
def pifs(self):
return self._getInterfaces("pif")
@property
def vlans(self):
return self._getInterfaces("vlan")
@property
def bridges(self):
return self._getInterfaces("bridge")
def __init__(self):
self.Parser = Parser()
def _createVlan(self, vlan):
"""
@param jsonString : parameter from client side
@return : succ xxxxx
ex. jsonString => {vid:100, pif:eth0}
ex. return =>
"""
#Pre-condition
#check Physical Interface Name
if vlan.pif not in self.pifs.keys():
msg = "Physical Interface(%s) does not exist" % vlan.pif
logger.debug(self._createVlan, msg)
raise Exception(msg)
#Pre-condition
#check Vlan Interface Name
ifName = "%s.%s" % (vlan.pif, vlan.vid)
if ifName in self.vlans.keys():
msg = "Vlan Interface(%s) already exist, return it" % ifName
logger.debug(self._createVlan, msg)
return self.vlans[ifName]
doCmd(['vconfig', 'add', vlan.pif, vlan.vid])
self.bringUP(ifName)
logger.debug(self._createVlan, "Create vlan %s successfully"%ifName)
return self.vlans[ifName]
def _deleteVlan(self, name):
if name not in self.vlans.keys():
raise Exception("No vlan device %s found"%name)
vlan = self.vlans[name]
self.bringDown(vlan.name)
doCmd(['vconfig', 'rem', vlan.name])
logger.debug(self._deleteVlan, "Delete vlan %s successfully"%vlan.name)
def _createBridge(self, bridge):
"""
@return : success
ex. {bridge:xapi100, attach:eth0.100}
create bridge interface, and attached it
cmd 1: ip link add bridge
cmd 2: ip link set dev
"""
if "xenbr" not in bridge.name and "vlan" not in bridge.name:
raise Exception("Invalid bridge name %s. Bridge name must be in partten xenbr/vlan, e.g. xenbr0"%bridge.name)
#pre-condition
#check Bridge Interface Name
if bridge.name in self.bridges.keys():
msg = "Bridge(%s) already exist, return it" % bridge.name
logger.debug(self._createBridge, msg)
return self.bridges[bridge.name]
#pre-condition
#check attach must exist
#possible to attach in PIF or VLAN
if bridge.attach not in self.vlans.keys() and bridge.attach not in self.pifs.keys():
msg = "%s is not either pif or vlan" % bridge.attach
logger.error(self._createBridge, msg)
raise Exception(msg)
doCmd(['ip', 'link', 'add', 'name', bridge.name, 'type', 'bridge'])
doCmd(['ip', 'link', 'set', 'dev', bridge.attach, 'master', bridge.name])
self.bringUP(bridge.name)
logger.debug(self._createBridge, "Create bridge %s on %s successfully"%(bridge.name, bridge.attach))
return self.bridges[bridge.name]
def _getBridges(self):
return self.bridges.keys()
def _getVlans(self):
return self.vlans.keys()
def _deleteBridge(self, name):
if name not in self.bridges.keys():
raise Exception("Can not find bridge %s"%name)
bridge = self.bridges[name]
if bridge.attach in bridge.interfaces: bridge.interfaces.remove(bridge.attach)
if len(bridge.interfaces) != 0:
logger.debug(self._deleteBridge, "There are still some interfaces(%s) on bridge %s"%(bridge.interfaces, bridge.name))
return False
self.bringDown(bridge.name)
doCmd(['ip', 'link', 'del', bridge.name])
logger.debug(self._deleteBridge, "Delete bridge %s successfully"%bridge.name)
return True
def _getInterfaces(self, type):
"""
@param type : ["pif", "bridge", "tap"]
@return : dictionary of Interface Objects
get All Interfaces based on type
"""
devices = os.listdir('/sys/class/net')
ifs = {}
if type == "pif":
devs = self.Parser.findall(Filter.Network.IFNAME_PIF, devices)
for dev in set(devs):
ifInst = OvmInterface()
ifInst.name = dev
ifs[dev] = ifInst
elif type == "vlan":
devs = self.Parser.findall(Filter.Network.IFNAME_VLAN, devices)
for dev in set(devs):
ifInst = OvmVlan()
ifInst.name = dev
(pif, vid) = dev.split('.')
ifInst.pif = pif
ifInst.vid = vid
ifs[dev] = ifInst
elif type == "bridge":
devs = self.Parser.findall(Filter.Network.IFNAME_BRIDGE, devices)
for dev in set(devs):
ifInst = OvmBridge()
ifInst.name = dev
devs = os.listdir(join('/sys/class/net', dev, 'brif'))
ifInst.interfaces = devs
attches = self.Parser.findall(Filter.Network.IFNAME_PIF, devs) + self.Parser.findall(Filter.Network.IFNAME_VLAN, devs)
if len(attches) > 1: raise Exception("Multiple PIF on bridge %s (%s)"%(dev, attches))
elif len(attches) == 0: ifInst.attach = "null"
elif len(attches) == 1: ifInst.attach = attches[0]
ifs[dev] = ifInst
return ifs
def bringUP(self, ifName):
doCmd(['ifconfig', ifName, 'up'])
def bringDown(self, ifName):
doCmd(['ifconfig', ifName, 'down'])
@staticmethod
def createBridge(jStr):
try:
network = OvmNetwork()
network._createBridge(toOvmBridge(jStr))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createBridge), errmsg)
@staticmethod
def deleteBridge(name):
try:
network = OvmNetwork()
network._deleteBridge(name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteBridge), errmsg)
@staticmethod
def getAllBridges():
try:
network = OvmNetwork()
rs = toGson(network._getBridges())
logger.debug(OvmNetwork.getAllBridges, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getAllBridges, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getAllBridges), errmsg)
@staticmethod
def getBridgeByIp(ip):
try:
routes = doCmd(['ip', 'route']).split('\n')
brName = None
for r in routes:
if ip in r and "xenbr" in r or "vlan" in r:
brName = r.split(' ')[2]
break
if not brName: raise Exception("Cannot find bridge with IP %s"%ip)
logger.debug(OvmNetwork.getBridgeByIp, "bridge:%s, ip:%s"%(brName, ip))
return toGson({"bridge":brName})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getBridgeByIp, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeByIp), errmsg)
@staticmethod
def getVlans():
try:
network = OvmNetwork()
rs = toGson(network._getVlans())
logger.debug(OvmNetwork.getVlans, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getVlans, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getVlans), errmsg)
@staticmethod
def createVlan(jStr):
try:
network = OvmNetwork()
vlan = network._createVlan(toOvmVlan(jStr))
rs = fromOvmVlan(vlan)
logger.debug(OvmNetwork.createVlan, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createVlan, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlan), errmsg)
@staticmethod
def createVlanBridge(bridgeDetails, vlanDetails):
try:
network = OvmNetwork()
v = toOvmVlan(vlanDetails)
b = toOvmBridge(bridgeDetails)
vlan = network._createVlan(v)
b.attach = vlan.name
network._createBridge(b)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createVlanBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlanBridge), errmsg)
@staticmethod
def deleteVlanBridge(name):
try:
network = OvmNetwork()
if name not in network.bridges.keys():
logger.debug(OvmNetwork.deleteVlanBridge, "No bridge %s found"%name)
return SUCC()
bridge = network.bridges[name]
vlanName = bridge.attach
if network._deleteBridge(name):
if vlanName != "null":
network._deleteVlan(vlanName)
else:
logger.warning(OvmNetwork.deleteVlanBridge, "Bridge %s has no vlan device"%name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteVlanBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlanBridge), errmsg)
@staticmethod
def getBridgeDetails(name):
try:
network = OvmNetwork()
if name not in network.bridges.keys():
raise Exception("No bridge %s found"%name)
bridge = network.bridges[name]
rs = fromOvmBridge(bridge)
logger.debug(OvmNetwork.getBridgeDetails, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getBridgeDetails, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeDetails), errmsg)
@staticmethod
def deleteVlan(name):
try:
network = OvmNetwork()
network._deleteVlan(name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteVlan, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlan), errmsg)
if __name__ == "__main__":
try:
OvmNetwork.getBridgeDetails(sys.argv[1])
#=======================================================================
# txt = json.dumps({"vid":104, "pif":"eth0"})
# txt2 = json.dumps({"name":"xapi3", "attach":"eth0.104"})
# print nw.createVlan(txt)
# print nw.createBridge(txt2)
#
# nw.deleteBridge("xapi3")
# nw.deleteVlan("eth0.104")
#=======================================================================
except Exception, e:
print e
|
GabrielBrascher/cloudstack
|
plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmNetworkModule.py
|
Python
|
apache-2.0
| 15,306
| 0.008363
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the migratoryBirds function below.
# {1:2, 2:4, 3:3, 4:4}
def migratoryBirds(arr):
frequentBird, frequency = 1, 0
birdsDict = {}
for i in arr:
if i not in birdsDict.keys():
birdsDict[i] = 1
else:
birdsDict[i] = birdsDict[i] + 1
for bird in birdsDict.keys():
if birdsDict[bird] > frequency:
frequency = birdsDict[bird]
frequentBird = bird
if birdsDict[bird] == frequency:
if bird < frequentBird:
frequentBird = bird
return frequentBird
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
MithileshCParab/HackerRank-10DaysOfStatistics
|
Problem Solving/Algorithms/Implementation/migratory_birds.py
|
Python
|
apache-2.0
| 925
| 0.007568
|
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import, print_function
import sys
import click
import cProfile
import pstats
import frappe
import frappe.utils
from functools import wraps
from six import StringIO
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
# print the top-100
for line in s.getvalue().splitlines()[:100]:
print(line)
return ret
return click.pass_context(_func)
def get_site(context):
try:
site = context.sites[0]
return site
except (IndexError, TypeError):
print('Please specify --site sitename')
sys.exit(1)
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
def get_commands():
# prevent circular imports
from .docs import commands as doc_commands
from .scheduler import commands as scheduler_commands
from .site import commands as site_commands
from .translate import commands as translate_commands
from .utils import commands as utils_commands
return list(set(doc_commands + scheduler_commands + site_commands + translate_commands + utils_commands))
commands = get_commands()
|
bohlian/frappe
|
frappe/commands/__init__.py
|
Python
|
mit
| 1,535
| 0.024756
|
import csv
import numpy
import pandas
import pymongo
import requests
from datetime import datetime
from io import StringIO
mongo_client = pymongo.MongoClient("localhost", 27017)
financial_db = mongo_client.financial_data
financial_collection = financial_db.data
class Pull:
def __call__(self, source, tickers, start_date, end_date):
if source=='Google':
results = self.google_call(tickers, start_date, end_date)
return results
elif source=='Database':
results = self.database_call(tickers, start_date, end_date)
return results
def google_call(self, tickers, start_date, end_date):
"""
google_call makes a call to the google finance api for historical data
Args:
None (uses the class variables)
Returns:
None (sets self.results)
"""
results = {}
for ticker in tickers:
data_string = "https://www.google.com/finance/historical?q={ticker_symbol}&startdate={start_date}&enddate={end_date}&output=csv".format(
ticker_symbol = ticker,
start_date = start_date,
end_date = end_date
)
df = pandas.read_csv(StringIO(requests.get(data_string).text))
df['Return'] = df.Close - df.Close.shift(-1)
df['DailyPeriodicReturn'] = (df['Return'] / df.Close.shift(-1))
df['ContinuouslyCompoundingDailyPeriodicReturn'] = numpy.log(df.Close / df.Close.shift(-1))
df = df.fillna(0.0)
results[ticker] = {
"symbol": ticker,
"date_added": datetime.utcnow(),
"data": df.to_dict(orient="records"),
"close_prices": list(df.Close.values),
"returns": list(df.Return.values),
"daily_periodic_return": list(df.DailyPeriodicReturn.values),
"continuous_daily_periodic_return": list(df.ContinuouslyCompoundingDailyPeriodicReturn.values),
"start_date": start_date,
"end_date": end_date,
"url": data_string
}
return results
def database_call(self, tickers, start_date, end_date):
"""
database_call makes a call to mongodb for the latest data
Args:
None
"""
results = {}
for ticker in tickers:
results[ticker] = financial_collection.find({
"ticker": ticker})[:][0]
return results
|
tchitchikov/goulash
|
python/pullData/src/pull.py
|
Python
|
apache-2.0
| 2,524
| 0.004358
|
#
# MLDB-2126-export-structured.py
# Mathieu Marquis Bolduc, 2017-01-25
# This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
#
import tempfile
import codecs
import os
from mldb import mldb, MldbUnitTest, ResponseException
tmp_dir = os.getenv('TMP')
class MLDB2126exportstructuredTest(MldbUnitTest): # noqa
def assert_file_content(self, filename, lines_expect):
f = codecs.open(filename, 'rb', 'utf8')
for index, expect in enumerate(lines_expect):
line = f.readline()[:-1]
self.assertEqual(line, expect)
def test_row(self):
# create the dataset
mldb.put('/v1/datasets/patate', {
'type': 'tabular'
})
mldb.post('/v1/datasets/patate/rows', {
'rowName': 0,
'columns': [
['x.a', 1, 0],
['x.b', 2, 0]
]}
)
mldb.post('/v1/datasets/patate/commit')
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
res = mldb.post('/v1/procedures', {
'type': 'export.csv',
'params': {
'exportData': 'select x as x from patate',
'dataFileUrl': 'file://' + tmp_file.name,
}
})
mldb.log(res)
lines_expect = ['x.a,x.b',
'1,2'
]
self.assert_file_content(tmp_file.name, lines_expect)
if __name__ == '__main__':
mldb.run_tests()
|
mldbai/mldb
|
testing/MLDB-2126-export-structured.py
|
Python
|
apache-2.0
| 1,480
| 0.001351
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-03-17 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin_apps_platesolving', '0011_update_platesolvingadvanced_settings_sample_raw_frame_file_verbose_name'),
]
operations = [
migrations.CreateModel(
name='PlateSolvingAdvancedTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(max_length=32)),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('task_params', models.TextField()),
],
),
]
|
astrobin/astrobin
|
astrobin_apps_platesolving/migrations/0012_platesolvingadvancedtask.py
|
Python
|
agpl-3.0
| 824
| 0.002427
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .modes import Modes
from . import models
from . import bridges
from . import layers
from . import processing
from .libs import * # noqa
from . import activations
from . import initializations
from . import losses
from . import metrics
from . import optimizers
from . import regularizations
from .rl import explorations, environments as envs, memories, stats, utils as rl_utils
from . import variables
from . import datasets
from . import estimators
from . import experiments
|
polyaxon/polyaxon-api
|
polyaxon_lib/__init__.py
|
Python
|
mit
| 572
| 0.001748
|
import itertools
import os
import urlparse
from collections import namedtuple, defaultdict
from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
BinaryOperatorNode, VariableNode, StringNode, NumberNode,
UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
import expected
"""Manifest structure used to update the expected results of a test
Each manifest file is represented by an ExpectedManifest that has one
or more TestNode children, one per test in the manifest. Each
TestNode has zero or more SubtestNode children, one for each known
subtest of the test.
In these representations, conditionals expressions in the manifest are
not evaluated upfront but stored as python functions to be evaluated
at runtime.
When a result for a test is to be updated set_result on the
[Sub]TestNode is called to store the new result, alongside the
existing conditional that result's run info matched, if any. Once all
new results are known, coalesce_expected is called to compute the new
set of results and conditionals. The AST of the underlying parsed manifest
is updated with the changes, and the result is serialised to a file.
"""
class ConditionError(Exception):
def __init__(self, cond=None):
self.cond = cond
class UpdateError(Exception):
pass
Value = namedtuple("Value", ["run_info", "value"])
def data_cls_getter(output_node, visited_node):
# visited_node is intentionally unused
if output_node is None:
return ExpectedManifest
elif isinstance(output_node, ExpectedManifest):
return TestNode
elif isinstance(output_node, TestNode):
return SubtestNode
else:
raise ValueError
class ExpectedManifest(ManifestItem):
def __init__(self, node, test_path=None, url_base=None, property_order=None,
boolean_properties=None):
"""Object representing all the tests in a particular manifest
:param node: AST Node associated with this object. If this is None,
a new AST is created to associate with this manifest.
:param test_path: Path of the test file associated with this manifest.
:param url_base: Base url for serving the tests in this manifest.
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
if node is None:
node = DataNode(None)
ManifestItem.__init__(self, node)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
assert self.url_base is not None
self.modified = False
self.boolean_properties = boolean_properties
self.property_order = property_order
self.update_properties = {
"lsan": LsanUpdate(self),
}
def append(self, child):
ManifestItem.append(self, child)
if child.id in self.child_map:
print "Warning: Duplicate heading %s" % child.id
self.child_map[child.id] = child
def _remove_child(self, child):
del self.child_map[child.id]
ManifestItem._remove_child(self, child)
def get_test(self, test_id):
"""Return a TestNode by test id, or None if no test matches
:param test_id: The id of the test to look up"""
return self.child_map.get(test_id)
def has_test(self, test_id):
"""Boolean indicating whether the current test has a known child test
with id test id
:param test_id: The id of the test to look up"""
return test_id in self.child_map
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
def set_lsan(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Lsan violations detected"""
self.update_properties["lsan"].set(run_info, result)
def coalesce_properties(self, stability):
for prop_update in self.update_properties.itervalues():
prop_update.coalesce(stability)
class TestNode(ManifestItem):
def __init__(self, node):
"""Tree node associated with a particular test in a manifest
:param node: AST node associated with the test"""
ManifestItem.__init__(self, node)
self.subtests = {}
self._from_file = True
self.new_disabled = False
self.update_properties = {
"expected": ExpectedUpdate(self),
"max-asserts": MaxAssertsUpdate(self),
"min-asserts": MinAssertsUpdate(self)
}
@classmethod
def create(cls, test_id):
"""Create a TestNode corresponding to a given test
:param test_type: The type of the test
:param test_id: The id of the test"""
url = test_id
name = url.rsplit("/", 1)[1]
node = DataNode(name)
self = cls(node)
self._from_file = False
return self
@property
def is_empty(self):
ignore_keys = set(["type"])
if set(self._data.keys()) - ignore_keys:
return False
return all(child.is_empty for child in self.children)
@property
def test_type(self):
"""The type of the test represented by this TestNode"""
return self.get("type", None)
@property
def id(self):
"""The id of the test represented by this TestNode"""
return urlparse.urljoin(self.parent.url, self.name)
def disabled(self, run_info):
"""Boolean indicating whether this test is disabled when run in an
environment with the given run_info
:param run_info: Dictionary of run_info parameters"""
return self.get("disabled", run_info) is not None
def set_result(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Status of the test in this run"""
self.update_properties["expected"].set(run_info, result)
def set_asserts(self, run_info, count):
"""Set the assert count of a test
"""
self.update_properties["min-asserts"].set(run_info, count)
self.update_properties["max-asserts"].set(run_info, count)
def _add_key_value(self, node, values):
ManifestItem._add_key_value(self, node, values)
if node.data in self.update_properties:
new_updated = []
self.update_properties[node.data].updated = new_updated
for value in values:
new_updated.append((value, []))
def clear(self, key):
"""Clear all the expected data for this test and all of its subtests"""
self.updated = []
if key in self._data:
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == key):
child.remove()
del self._data[key]
break
for subtest in self.subtests.itervalues():
subtest.clear(key)
def append(self, node):
child = ManifestItem.append(self, node)
self.subtests[child.name] = child
def get_subtest(self, name):
"""Return a SubtestNode corresponding to a particular subtest of
the current test, creating a new one if no subtest with that name
already exists.
:param name: Name of the subtest"""
if name in self.subtests:
return self.subtests[name]
else:
subtest = SubtestNode.create(name)
self.append(subtest)
return subtest
def coalesce_properties(self, stability):
for prop_update in self.update_properties.itervalues():
prop_update.coalesce(stability)
class SubtestNode(TestNode):
def __init__(self, node):
assert isinstance(node, DataNode)
TestNode.__init__(self, node)
@classmethod
def create(cls, name):
node = DataNode(name)
self = cls(node)
return self
@property
def is_empty(self):
if self._data:
return False
return True
class PropertyUpdate(object):
property_name = None
cls_default_value = None
value_type = None
def __init__(self, node):
self.node = node
self.updated = []
self.new = []
self.default_value = self.cls_default_value
def set(self, run_info, in_value):
self.check_default(in_value)
value = self.get_value(in_value)
# Add this result to the list of results satisfying
# any condition in the list of updated results it matches
for (cond, values) in self.updated:
if cond(run_info):
values.append(Value(run_info, value))
if value != cond.value_as(self.value_type):
self.node.root.modified = True
break
else:
# We didn't find a previous value for this
self.new.append(Value(run_info, value))
self.node.root.modified = True
def check_default(self, result):
return
def get_value(self, in_value):
return in_value
def coalesce(self, stability=None):
"""Update the underlying manifest AST for this test based on all the
added results.
This will update existing conditionals if they got the same result in
all matching runs in the updated results, will delete existing conditionals
that get more than one different result in the updated run, and add new
conditionals for anything that doesn't match an existing conditional.
Conditionals not matched by any added result are not changed.
When `stability` is not None, disable any test that shows multiple
unexpected results for the same set of parameters.
"""
try:
unconditional_value = self.node.get(self.property_name)
if self.value_type:
unconditional_value = self.value_type(unconditional_value)
except KeyError:
unconditional_value = self.default_value
for conditional_value, results in self.updated:
if not results:
# The conditional didn't match anything in these runs so leave it alone
pass
elif all(results[0].value == result.value for result in results):
# All the new values for this conditional matched, so update the node
result = results[0]
if (result.value == unconditional_value and
conditional_value.condition_node is not None):
if self.property_name in self.node:
self.node.remove_value(self.property_name, conditional_value)
else:
conditional_value.value = self.update_value(conditional_value.value_as(self.value_type),
result.value)
elif conditional_value.condition_node is not None:
# Blow away the existing condition and rebuild from scratch
# This isn't sure to work if we have a conditional later that matches
# these values too, but we can hope, verify that we get the results
# we expect, and if not let a human sort it out
self.node.remove_value(self.property_name, conditional_value)
self.new.extend(results)
elif conditional_value.condition_node is None:
self.new.extend(result for result in results
if result.value != unconditional_value)
# It is an invariant that nothing in new matches an existing
# condition except for the default condition
if self.new:
update_default, new_default_value = self.update_default()
if update_default:
if new_default_value != self.default_value:
self.node.set(self.property_name,
self.update_value(unconditional_value, new_default_value),
condition=None)
else:
try:
self.add_new(unconditional_value, stability)
except UpdateError as e:
print("%s for %s, cannot update %s" % (e, self.node.root.test_path,
self.property_name))
# Remove cases where the value matches the default
if (self.property_name in self.node._data and
len(self.node._data[self.property_name]) > 0 and
self.node._data[self.property_name][-1].condition_node is None and
self.node._data[self.property_name][-1].value_as(self.value_type) == self.default_value):
self.node.remove_value(self.property_name, self.node._data[self.property_name][-1])
# Remove empty properties
if (self.property_name in self.node._data and len(self.node._data[self.property_name]) == 0):
for child in self.node.children:
if (isinstance(child, KeyValueNode) and child.data == self.property_name):
child.remove()
break
def update_default(self):
"""Get the updated default value for the property (i.e. the one chosen when no conditions match).
:returns: (update, new_default_value) where updated is a bool indicating whether the property
should be updated, and new_default_value is the value to set if it should."""
raise NotImplementedError
def add_new(self, unconditional_value, stability):
"""Add new conditional values for the property.
Subclasses need not implement this if they only ever update the default value."""
raise NotImplementedError
def update_value(self, old_value, new_value):
"""Get a value to set on the property, given its previous value and the new value from logs.
By default this just returns the new value, but overriding is useful in cases
where we want the new value to be some function of both old and new e.g. max(old_value, new_value)"""
return new_value
class ExpectedUpdate(PropertyUpdate):
property_name = "expected"
def check_default(self, result):
if self.default_value is not None:
assert self.default_value == result.default_expected
else:
self.default_value = result.default_expected
def get_value(self, in_value):
return in_value.status
def update_default(self):
update_default = all(self.new[0].value == result.value
for result in self.new) and not self.updated
new_value = self.new[0].value
return update_default, new_value
def add_new(self, unconditional_value, stability):
try:
conditionals = group_conditionals(
self.new,
property_order=self.node.root.property_order,
boolean_properties=self.node.root.boolean_properties)
except ConditionError as e:
if stability is not None:
self.node.set("disabled", stability or "unstable", e.cond.children[0])
self.node.new_disabled = True
else:
raise UpdateError("Conflicting metadata values")
for conditional_node, value in conditionals:
if value != unconditional_value:
self.node.set(self.property_name, value, condition=conditional_node.children[0])
class MaxAssertsUpdate(PropertyUpdate):
property_name = "max-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and old_value < new_value:
return new_value + 1
if old_value is None:
return new_value + 1
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
The value we set as the default is the maximum the current default or one more than the
number of asserts we saw in any configuration."""
# Current values
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(int(current_default[0].value))
values.extend(item.value for item in self.new)
values.extend(item.value for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = max(values)
return True, new_value
class MinAssertsUpdate(PropertyUpdate):
property_name = "min-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and new_value < old_value:
return 0
if old_value is None:
# If we are getting some asserts for the first time, set the minimum to 0
return new_value
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
This is either set to the current value or one less than the number of asserts
we saw, whichever is lower."""
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(current_default[0].value_as(self.value_type))
values.extend(max(0, item.value) for item in self.new)
values.extend(max(0, item.value) for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = min(values)
return True, new_value
class LsanUpdate(PropertyUpdate):
property_name = "lsan-allowed"
cls_default_value = None
def get_value(self, result):
# If we have an allowed_match that matched, return None
# This value is ignored later (because it matches the default)
# We do that because then if we allow a failure in foo/__dir__.ini
# we don't want to update foo/bar/__dir__.ini with the same rule
if result[1]:
return None
# Otherwise return the topmost stack frame
# TODO: there is probably some improvement to be made by looking for a "better" stack frame
return result[0][0]
def update_value(self, old_value, new_value):
if isinstance(new_value, (str, unicode)):
new_value = {new_value}
else:
new_value = set(new_value)
if old_value is None:
old_value = set()
old_value = set(old_value)
return sorted((old_value | new_value) - {None})
def update_default(self):
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
current_default = current_default[0].value
new_values = [item.value for item in self.new]
new_value = self.update_value(current_default, new_values)
return True, new_value if new_value else None
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Value objects, return a list of
(conditional_node, status) pairs representing the conditional
expressions that are required to match each status
:param values: List of Values
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
by_property = defaultdict(set)
for run_info, value in values:
for prop_name, prop_value in run_info.iteritems():
by_property[(prop_name, prop_value)].add(value)
if property_order is None:
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
else:
boolean_properties = set(boolean_properties)
# If we have more than one value, remove any properties that are common
# for all the values
if len(values) > 1:
for key, statuses in by_property.copy().iteritems():
if len(statuses) == len(values):
del by_property[key]
if not by_property:
raise ConditionError
properties = set(item[0] for item in by_property.iterkeys())
include_props = []
for prop in property_order:
if prop in properties:
include_props.append(prop)
conditions = {}
for run_info, value in values:
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
if prop_set in conditions:
if conditions[prop_set][1] != value:
# A prop_set contains contradictory results
raise ConditionError(make_expr(prop_set, value, boolean_properties))
continue
expr = make_expr(prop_set, value, boolean_properties=boolean_properties)
conditions[prop_set] = (expr, value)
return conditions.values()
def make_expr(prop_set, rhs, boolean_properties=None):
"""Create an AST that returns the value ``status`` given all the
properties in prop_set match.
:param prop_set: tuple of (property name, value) pairs for each
property in this expression and the value it must match
:param status: Status on RHS when all the given properties match
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
root = ConditionalNode()
assert len(prop_set) > 0
expressions = []
for prop, value in prop_set:
number_types = (int, float, long)
value_cls = (NumberNode
if type(value) in number_types
else StringNode)
if prop not in boolean_properties:
expressions.append(
BinaryExpressionNode(
BinaryOperatorNode("=="),
VariableNode(prop),
value_cls(unicode(value))
))
else:
if value:
expressions.append(VariableNode(prop))
else:
expressions.append(
UnaryExpressionNode(
UnaryOperatorNode("not"),
VariableNode(prop)
))
if len(expressions) > 1:
prev = expressions[-1]
for curr in reversed(expressions[:-1]):
node = BinaryExpressionNode(
BinaryOperatorNode("and"),
curr,
prev)
prev = node
else:
node = expressions[0]
root.append(node)
if type(rhs) in number_types:
rhs_node = NumberNode(rhs)
else:
rhs_node = StringNode(rhs)
root.append(rhs_node)
return root
def get_manifest(metadata_root, test_path, url_base, property_order=None,
boolean_properties=None):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param test_path: Path to the test(s) relative to the test root
:param url_base: Base url for serving the tests in this manifest
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
manifest_path = expected.expected_path(metadata_root, test_path)
try:
with open(manifest_path) as f:
return compile(f, test_path, url_base, property_order=property_order,
boolean_properties=boolean_properties)
except IOError:
return None
def compile(manifest_file, test_path, url_base, property_order=None,
boolean_properties=None):
return conditional.compile(manifest_file,
data_cls_getter=data_cls_getter,
test_path=test_path,
url_base=url_base,
property_order=property_order,
boolean_properties=boolean_properties)
|
danlrobertson/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/manifestupdate.py
|
Python
|
mpl-2.0
| 25,973
| 0.001617
|
import os
class Config(object):
DEBUG = False
TESTING = False
SECRET_KEY = 'A0Zr18h/3yX R~XHH!jmN]LWX/,?RT'
DATABASE = {
'engine': 'playhouse.pool.PooledPostgresqlExtDatabase',
'name': 'middleware',
'user': 'comunitea',
'port': '5434',
'host': 'localhost',
'max_connections': None,
'autocommit': True,
'autorollback': True,
'stale_timeout': 600}
NOTIFY_URL = "https://www.visiotechsecurity.com/?option=com_sync&task=sync.syncOdoo"
NOTIFY_USER = os.environ.get('NOTIFY_USER')
NOTIFY_PASSWORD = os.environ.get('NOTIFY_PASSWORD')
|
jgmanzanas/CMNT_004_15
|
project-addons/vt_flask_middleware/config.py
|
Python
|
agpl-3.0
| 705
| 0.004255
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os.path
import boto3.session
import botocore.exceptions
import freezegun
import pretend
import pytest
import redis
from zope.interface.verify import verifyClass
from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
from warehouse.packaging.services import (
RedisDownloadStatService, LocalFileStorage, S3FileStorage,
)
@freezegun.freeze_time("2012-01-14")
class TestRedisDownloadStatService:
def test_verify_service(self):
assert verifyClass(IDownloadStatService, RedisDownloadStatService)
def test_creates_redis(self, monkeypatch):
redis_obj = pretend.stub()
redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda u: redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", redis_cls)
url = pretend.stub()
svc = RedisDownloadStatService(url)
assert svc.redis is redis_obj
assert redis_cls.from_url.calls == [pretend.call(url)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_daily_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = (
["downloads:hour:12-01-14-00:foo"] +
[
"downloads:hour:12-01-13-{:02d}:foo".format(i)
for i in reversed(range(24))
] +
["downloads:hour:12-01-12-23:foo"]
)
assert svc.get_daily_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_weekly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i + 7)
for i in reversed(range(8))
]
assert svc.get_weekly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_monthly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i)
for i in reversed(range(1, 15))
] + [
"downloads:daily:11-12-{:02d}:foo".format(i + 15)
for i in reversed(range(17))
]
assert svc.get_monthly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
class TestLocalFileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, LocalFileStorage)
def test_basic_init(self):
storage = LocalFileStorage("/foo/bar/")
assert storage.base == "/foo/bar/"
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(
settings={"files.path": "/the/one/two/"},
),
)
storage = LocalFileStorage.create_service(None, request)
assert storage.base == "/the/one/two/"
def test_gets_file(self, tmpdir):
with open(str(tmpdir.join("file.txt")), "wb") as fp:
fp.write(b"my test file contents")
storage = LocalFileStorage(str(tmpdir))
file_object = storage.get("file.txt")
assert file_object.read() == b"my test file contents"
def test_raises_when_file_non_existant(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/bar.txt", filename)
with open(os.path.join(storage_dir, "foo/bar.txt"), "rb") as fp:
assert fp.read() == b"Test File!"
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb") as fp:
fp.write(b"First Test File!")
filename2 = str(tmpdir.join("testfile2.txt"))
with open(filename2, "wb") as fp:
fp.write(b"Second Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/first.txt", filename1)
storage.store("foo/second.txt", filename2)
with open(os.path.join(storage_dir, "foo/first.txt"), "rb") as fp:
assert fp.read() == b"First Test File!"
with open(os.path.join(storage_dir, "foo/second.txt"), "rb") as fp:
assert fp.read() == b"Second Test File!"
class TestS3FileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, S3FileStorage)
def test_basic_init(self):
bucket = pretend.stub()
storage = S3FileStorage(bucket)
assert storage.bucket is bucket
def test_create_service(self):
session = boto3.session.Session()
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: session),
registry=pretend.stub(settings={"files.bucket": "froblob"}),
)
storage = S3FileStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="aws.session")]
assert storage.bucket.name == "froblob"
def test_gets_file(self):
s3key = pretend.stub(get=lambda: {"Body": io.BytesIO(b"my contents")})
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
file_object = storage.get("file.txt")
assert file_object.read() == b"my contents"
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_raises_when_key_non_existant(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "NoSuchKey", "Message": "No Key!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_passes_up_error_when_not_no_such_key(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "SomeOtherError", "Message": "Who Knows!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=lambda path: s3key)
storage = S3FileStorage(bucket)
with pytest.raises(botocore.exceptions.ClientError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
bucket = pretend.stub(
upload_file=pretend.call_recorder(lambda filename, key: None),
)
storage = S3FileStorage(bucket)
storage.store("foo/bar.txt", filename)
assert bucket.upload_file.calls == [
pretend.call(filename, "foo/bar.txt"),
]
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb") as fp:
fp.write(b"First Test File!")
filename2 = str(tmpdir.join("testfile2.txt"))
with open(filename2, "wb") as fp:
fp.write(b"Second Test File!")
bucket = pretend.stub(
upload_file=pretend.call_recorder(lambda filename, key: None),
)
storage = S3FileStorage(bucket)
storage.store("foo/first.txt", filename1)
storage.store("foo/second.txt", filename2)
assert bucket.upload_file.calls == [
pretend.call(filename1, "foo/first.txt"),
pretend.call(filename2, "foo/second.txt"),
]
|
ismail-s/warehouse
|
tests/unit/packaging/test_services.py
|
Python
|
apache-2.0
| 9,105
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Hazard Risk",
"version": "8.0.1.1.0",
"author": "Savoir-faire Linux, Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"depends": [
'mgmtsystem_hazard',
'hr'
],
"data": [
'security/ir.model.access.csv',
'data/mgmtsystem_hazard_risk_computation.xml',
'data/mgmtsystem_hazard_risk_type.xml',
'views/res_company.xml',
'views/mgmtsystem_hazard.xml',
'views/mgmtsystem_hazard_risk_type.xml',
'views/mgmtsystem_hazard_risk_computation.xml',
'views/mgmtsystem_hazard_residual_risk.xml',
],
"installable": True,
}
|
jobiols/management-system
|
mgmtsystem_hazard_risk/__openerp__.py
|
Python
|
agpl-3.0
| 1,727
| 0
|
# -*- coding: utf-8 -*-
"""
celery.events.dumper
~~~~~~~~~~~~~~~~~~~~
THis is a simple program that dumps events to the console
as they happen. Think of it like a `tcpdump` for Celery events.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from datetime import datetime
from ..app import app_or_default
from ..datastructures import LRUCache
TASK_NAMES = LRUCache(limit=0xFFF)
HUMAN_TYPES = {"worker-offline": "shutdown",
"worker-online": "started",
"worker-heartbeat": "heartbeat"}
def humanize_type(type):
try:
return HUMAN_TYPES[type.lower()]
except KeyError:
return type.lower().replace("-", " ")
class Dumper(object):
def on_event(self, event):
timestamp = datetime.fromtimestamp(event.pop("timestamp"))
type = event.pop("type").lower()
hostname = event.pop("hostname")
if type.startswith("task-"):
uuid = event.pop("uuid")
if type in ("task-received", "task-sent"):
task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % (
event.pop("name"), uuid,
event.pop("args"),
event.pop("kwargs"))
else:
task = TASK_NAMES.get(uuid, "")
return self.format_task_event(hostname, timestamp,
type, task, event)
fields = ", ".join("%s=%s" % (key, event[key])
for key in sorted(event.keys()))
sep = fields and ":" or ""
print("%s [%s] %s%s %s" % (hostname, timestamp,
humanize_type(type), sep, fields))
def format_task_event(self, hostname, timestamp, type, task, event):
fields = ", ".join("%s=%s" % (key, event[key])
for key in sorted(event.keys()))
sep = fields and ":" or ""
print("%s [%s] %s%s %s %s" % (hostname, timestamp,
humanize_type(type), sep, task, fields))
def evdump(app=None):
sys.stderr.write("-> evdump: starting capture...\n")
app = app_or_default(app)
dumper = Dumper()
conn = app.broker_connection()
recv = app.events.Receiver(conn, handlers={"*": dumper.on_event})
try:
recv.capture()
except (KeyboardInterrupt, SystemExit):
conn and conn.close()
if __name__ == "__main__":
evdump()
|
mzdaniel/oh-mainline
|
vendor/packages/celery/celery/events/dumper.py
|
Python
|
agpl-3.0
| 2,533
| 0.001974
|
"""Config flow to configure the OVO Energy integration."""
import aiohttp
from ovoenergy.ovoenergy import OVOEnergy
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import DOMAIN # pylint: disable=unused-import
REAUTH_SCHEMA = vol.Schema({vol.Required(CONF_PASSWORD): str})
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class OVOEnergyFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a OVO Energy config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the flow."""
self.username = None
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "cannot_connect"
else:
if authenticated:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=client.username,
data={
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
errors["base"] = "invalid_auth"
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
async def async_step_reauth(self, user_input):
"""Handle configuration by re-auth."""
errors = {}
if user_input and user_input.get(CONF_USERNAME):
self.username = user_input[CONF_USERNAME]
self.context["title_placeholders"] = {CONF_USERNAME: self.username}
if user_input is not None and user_input.get(CONF_PASSWORD) is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
self.username, user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "connection_error"
else:
if authenticated:
await self.async_set_unique_id(self.username)
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry,
data={
CONF_USERNAME: self.username,
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
return self.async_abort(reason="reauth_successful")
errors["base"] = "authorization_error"
return self.async_show_form(
step_id="reauth", data_schema=REAUTH_SCHEMA, errors=errors
)
|
partofthething/home-assistant
|
homeassistant/components/ovo_energy/config_flow.py
|
Python
|
apache-2.0
| 3,440
| 0.000291
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import _mssql
import pymssql
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pymssql.sourceforge.net/
User guide: http://pymssql.sourceforge.net/examples_pymssql.php
API: http://pymssql.sourceforge.net/ref_pymssql.php
Debian package: python-pymssql
License: LGPL
Possible connectors: http://wiki.python.org/moin/SQL%20Server
Important note: pymssql library on your system MUST be version 1.0.2
to work, get it from http://sourceforge.net/projects/pymssql/files/pymssql/1.0.2/
"""
def __init__(self):
GenericConnector.__init__(self)
def connect(self):
self.initConnection()
try:
self.connector = pymssql.connect(host="%s:%d" % (self.hostname, self.port), user=self.user, password=self.password, database=self.db, login_timeout=conf.timeout, timeout=conf.timeout)
except pymssql.OperationalError, msg:
raise SqlmapConnectionException(msg)
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except (pymssql.ProgrammingError, pymssql.OperationalError, _mssql.MssqlDatabaseException), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
return None
def execute(self, query):
retVal = False
try:
self.cursor.execute(utf8encode(query))
retVal = True
except (pymssql.OperationalError, pymssql.ProgrammingError), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
except pymssql.InternalError, msg:
raise SqlmapConnectionException(msg)
return retVal
def select(self, query):
retVal = None
if self.execute(query):
retVal = self.fetchall()
try:
self.connector.commit()
except pymssql.OperationalError:
pass
return retVal
|
V11/volcano
|
server/sqlmap/plugins/dbms/sybase/connector.py
|
Python
|
mit
| 2,499
| 0.002401
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
diff.py
---------------------
Date : November 2013
Copyright : (C) 2013-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2013'
__copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from feature import Feature
from geogig import NULL_ID
TYPE_MODIFIED = "Modified"
TYPE_ADDED = "Added"
TYPE_REMOVED = "Removed"
ATTRIBUTE_DIFF_MODIFIED, ATTRIBUTE_DIFF_ADDED, ATTRIBUTE_DIFF_REMOVED, ATTRIBUTE_DIFF_UNCHANGED = ["M", "A", "R", "U"]
class Diffentry(object):
'''A difference between two references for a given path'''
def __init__(self, repo, oldcommitref, newcommitref, oldref, newref, path):
self.repo = repo
self.path = path
self.oldref = oldref
self.newref = newref
self.oldcommitref = oldcommitref
self.newcommitref = newcommitref
def oldobject(self):
if self.oldref == NULL_ID:
return None
else:
return Feature(self.repo, self.oldcommitref, self.path)
def newobject(self):
if self.newref == NULL_ID:
return None
else:
return Feature(self.repo, self.newcommitref, self.path)
def featurediff(self):
return self.repo.featurediff(self.oldcommitref, self.newcommitref, self.path)
def type(self):
if self.oldref == NULL_ID:
return TYPE_ADDED
elif self.newref == NULL_ID:
return TYPE_REMOVED
else:
return TYPE_MODIFIED
def __str__(self):
if self.oldref == NULL_ID:
return "%s %s (%s)" % (TYPE_ADDED, self.path, self.newref)
elif self.newref == NULL_ID:
return TYPE_REMOVED + " " + self.path
else:
return "%s %s (%s --> %s)" % (TYPE_MODIFIED, self.path, self.oldref, self.newref)
|
boundlessgeo/geogig-py
|
src/geogigpy/diff.py
|
Python
|
bsd-3-clause
| 2,681
| 0.001119
|
"""
This file is part of py-sonic.
py-sonic is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
py-sonic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with py-sonic. If not, see <http://www.gnu.org/licenses/>
"""
from base64 import b64encode
from urllib import urlencode
from .errors import *
from pprint import pprint
from cStringIO import StringIO
from netrc import netrc
import json, urllib2, httplib, logging, socket, ssl, sys
API_VERSION = '1.13.0'
logger = logging.getLogger(__name__)
class HTTPSConnectionChain(httplib.HTTPSConnection):
_preferred_ssl_protos = sorted([ p for p in dir(ssl)
if p.startswith('PROTOCOL_') ], reverse=True)
_ssl_working_proto = None
def _create_sock(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
return sock
def connect(self):
if self._ssl_working_proto is not None:
# If we have a working proto, let's use that straight away
logger.debug("Using known working proto: '%s'",
self._ssl_working_proto)
sock = self._create_sock()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=self._ssl_working_proto)
return
# Try connecting via the different SSL protos in preference order
for proto_name in self._preferred_ssl_protos:
sock = self._create_sock()
proto = getattr(ssl, proto_name, None)
try:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=proto)
except:
sock.close()
else:
# Cache the working ssl version
HTTPSConnectionChain._ssl_working_proto = proto
break
class HTTPSHandlerChain(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnectionChain, req)
# install opener
urllib2.install_opener(urllib2.build_opener(HTTPSHandlerChain()))
class PysHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""
This class is used to override the default behavior of the
HTTPRedirectHandler, which does *not* redirect POST data
"""
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
data = None
if req.has_data():
data = req.get_data()
return urllib2.Request(newurl,
data=data,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class Connection(object):
def __init__(self, baseUrl, username=None, password=None, port=4040,
serverPath='/rest', appName='py-sonic', apiVersion=API_VERSION,
insecure=False, useNetrc=None):
"""
This will create a connection to your subsonic server
baseUrl:str The base url for your server. Be sure to use
"https" for SSL connections. If you are using
a port other than the default 4040, be sure to
specify that with the port argument. Do *not*
append it here.
ex: http://subsonic.example.com
If you are running subsonic under a different
path, specify that with the "serverPath" arg,
*not* here. For example, if your subsonic
lives at:
https://mydomain.com:8080/path/to/subsonic/rest
You would set the following:
baseUrl = "https://mydomain.com"
port = 8080
serverPath = "/path/to/subsonic/rest"
username:str The username to use for the connection. This
can be None if `useNetrc' is True (and you
have a valid entry in your netrc file)
password:str The password to use for the connection. This
can be None if `useNetrc' is True (and you
have a valid entry in your netrc file)
port:int The port number to connect on. The default for
unencrypted subsonic connections is 4040
serverPath:str The base resource path for the subsonic views.
This is useful if you have your subsonic server
behind a proxy and the path that you are proxying
is different from the default of '/rest'.
Ex:
serverPath='/path/to/subs'
The full url that would be built then would be
(assuming defaults and using "example.com" and
you are using the "ping" view):
http://example.com:4040/path/to/subs/ping.view
appName:str The name of your application.
apiVersion:str The API version you wish to use for your
application. Subsonic will throw an error if you
try to use/send an api version higher than what
the server supports. See the Subsonic API docs
to find the Subsonic version -> API version table.
This is useful if you are connecting to an older
version of Subsonic.
insecure:bool This will allow you to use self signed
certificates when connecting if set to True.
useNetrc:str|bool You can either specify a specific netrc
formatted file or True to use your default
netrc file ($HOME/.netrc).
"""
self._baseUrl = baseUrl
self._hostname = baseUrl.split('://')[1].strip()
self._username = username
self._rawPass = password
self._netrc = None
if useNetrc is not None:
self._process_netrc(useNetrc)
elif username is None or password is None:
raise CredentialError('You must specify either a username/password '
'combination or "useNetrc" must be either True or a string '
'representing a path to a netrc file')
self._port = int(port)
self._apiVersion = apiVersion
self._appName = appName
self._serverPath = serverPath.strip('/')
self._insecure = insecure
self._opener = self._getOpener(self._username, self._rawPass)
# Properties
def setBaseUrl(self, url):
self._baseUrl = url
self._opener = self._getOpener(self._username, self._rawPass)
baseUrl = property(lambda s: s._baseUrl, setBaseUrl)
def setPort(self, port):
self._port = int(port)
port = property(lambda s: s._port, setPort)
def setUsername(self, username):
self._username = username
self._opener = self._getOpener(self._username, self._rawPass)
username = property(lambda s: s._username, setUsername)
def setPassword(self, password):
self._rawPass = password
# Redo the opener with the new creds
self._opener = self._getOpener(self._username, self._rawPass)
password = property(lambda s: s._rawPass, setPassword)
apiVersion = property(lambda s: s._apiVersion)
def setAppName(self, appName):
self._appName = appName
appName = property(lambda s: s._appName, setAppName)
def setServerPath(self, path):
self._serverPath = path.strip('/')
serverPath = property(lambda s: s._serverPath, setServerPath)
def setInsecure(self, insecure):
self._insecure = insecure
insecure = property(lambda s: s._insecure, setInsecure)
# API methods
def ping(self):
"""
since: 1.0.0
Returns a boolean True if the server is alive, False otherwise
"""
methodName = 'ping'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
try:
res = self._doInfoReq(req)
except:
return False
if res['status'] == 'ok':
return True
elif res['status'] == 'failed':
exc = getExcByCode(res['error']['code'])
raise exc(res['error']['message'])
return False
def getLicense(self):
"""
since: 1.0.0
Gets details related to the software license
Returns a dict like the following:
{u'license': {u'date': u'2010-05-21T11:14:39',
u'email': u'email@example.com',
u'key': u'12345678901234567890123456789012',
u'valid': True},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getLicense'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getMusicFolders(self):
"""
since: 1.0.0
Returns all configured music folders
Returns a dict like the following:
{u'musicFolders': {u'musicFolder': [{u'id': 0, u'name': u'folder1'},
{u'id': 1, u'name': u'folder2'},
{u'id': 2, u'name': u'folder3'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getMusicFolders'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getNowPlaying(self):
"""
since: 1.0.0
Returns what is currently being played by all users
Returns a dict like the following:
{u'nowPlaying': {u'entry': {u'album': u"Jazz 'Round Midnight 12",
u'artist': u'Astrud Gilberto',
u'bitRate': 172,
u'contentType': u'audio/mpeg',
u'coverArt': u'98349284',
u'duration': 325,
u'genre': u'Jazz',
u'id': u'2424324',
u'isDir': False,
u'isVideo': False,
u'minutesAgo': 0,
u'parent': u'542352',
u'path': u"Astrud Gilberto/Jazz 'Round Midnight 12/01 - The Girl From Ipanema.mp3",
u'playerId': 1,
u'size': 7004089,
u'suffix': u'mp3',
u'title': u'The Girl From Ipanema',
u'track': 1,
u'username': u'user1',
u'year': 1996}},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getNowPlaying'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getIndexes(self, musicFolderId=None, ifModifiedSince=0):
"""
since: 1.0.0
Returns an indexed structure of all artists
musicFolderId:int If this is specified, it will only return
artists for the given folder ID from
the getMusicFolders call
ifModifiedSince:int If specified, return a result if the artist
collection has changed since the given
unix timestamp
Returns a dict like the following:
{u'indexes': {u'index': [{u'artist': [{u'id': u'29834728934',
u'name': u'A Perfect Circle'},
{u'id': u'238472893',
u'name': u'A Small Good Thing'},
{u'id': u'9327842983',
u'name': u'A Tribe Called Quest'},
{u'id': u'29348729874',
u'name': u'A-Teens, The'},
{u'id': u'298472938',
u'name': u'ABA STRUCTURE'}],
u'lastModified': 1303318347000L},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getIndexes'
viewName = '%s.view' % methodName
q = self._getQueryDict({'musicFolderId': musicFolderId,
'ifModifiedSince': self._ts2milli(ifModifiedSince)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
self._fixLastModified(res)
return res
def getMusicDirectory(self, mid):
"""
since: 1.0.0
Returns a listing of all files in a music directory. Typically used
to get a list of albums for an artist or list of songs for an album.
mid:str The string ID value which uniquely identifies the
folder. Obtained via calls to getIndexes or
getMusicDirectory. REQUIRED
Returns a dict like the following:
{u'directory': {u'child': [{u'artist': u'A Tribe Called Quest',
u'coverArt': u'223484',
u'id': u'329084',
u'isDir': True,
u'parent': u'234823940',
u'title': u'Beats, Rhymes And Life'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'234823794',
u'id': u'238472893',
u'isDir': True,
u'parent': u'2308472938',
u'title': u'Midnight Marauders'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'39284792374',
u'id': u'983274892',
u'isDir': True,
u'parent': u'9823749',
u'title': u"People's Instinctive Travels And The Paths Of Rhythm"},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'289347293',
u'id': u'3894723934',
u'isDir': True,
u'parent': u'9832942',
u'title': u'The Anthology'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'923847923',
u'id': u'29834729',
u'isDir': True,
u'parent': u'2934872893',
u'title': u'The Love Movement'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'9238742893',
u'id': u'238947293',
u'isDir': True,
u'parent': u'9432878492',
u'title': u'The Low End Theory'}],
u'id': u'329847293',
u'name': u'A Tribe Called Quest'},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getMusicDirectory'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': mid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search(self, artist=None, album=None, title=None, any=None,
count=20, offset=0, newerThan=None):
"""
since: 1.0.0
DEPRECATED SINCE API 1.4.0! USE search2() INSTEAD!
Returns a listing of files matching the given search criteria.
Supports paging with offset
artist:str Search for artist
album:str Search for album
title:str Search for title of song
any:str Search all fields
count:int Max number of results to return [default: 20]
offset:int Search result offset. For paging [default: 0]
newerThan:int Return matches newer than this timestamp
"""
if artist == album == title == any == None:
raise ArgumentError('Invalid search. You must supply search '
'criteria')
methodName = 'search'
viewName = '%s.view' % methodName
q = self._getQueryDict({'artist': artist, 'album': album,
'title': title, 'any': any, 'count': count, 'offset': offset,
'newerThan': self._ts2milli(newerThan)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search2(self, query, artistCount=20, artistOffset=0, albumCount=20,
albumOffset=0, songCount=20, songOffset=0, musicFolderId=None):
"""
since: 1.4.0
Returns albums, artists and songs matching the given search criteria.
Supports paging through the result.
query:str The search query
artistCount:int Max number of artists to return [default: 20]
artistOffset:int Search offset for artists (for paging) [default: 0]
albumCount:int Max number of albums to return [default: 20]
albumOffset:int Search offset for albums (for paging) [default: 0]
songCount:int Max number of songs to return [default: 20]
songOffset:int Search offset for songs (for paging) [default: 0]
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns a dict like the following:
{u'searchResult2': {u'album': [{u'artist': u'A Tribe Called Quest',
u'coverArt': u'289347',
u'id': u'32487298',
u'isDir': True,
u'parent': u'98374289',
u'title': u'The Love Movement'}],
u'artist': [{u'id': u'2947839',
u'name': u'A Tribe Called Quest'},
{u'id': u'239847239',
u'name': u'Tribe'}],
u'song': [{u'album': u'Beats, Rhymes And Life',
u'artist': u'A Tribe Called Quest',
u'bitRate': 224,
u'contentType': u'audio/mpeg',
u'coverArt': u'329847',
u'duration': 148,
u'genre': u'default',
u'id': u'3928472893',
u'isDir': False,
u'isVideo': False,
u'parent': u'23984728394',
u'path': u'A Tribe Called Quest/Beats, Rhymes And Life/A Tribe Called Quest - Beats, Rhymes And Life - 03 - Motivators.mp3',
u'size': 4171913,
u'suffix': u'mp3',
u'title': u'Motivators',
u'track': 3}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'search2'
viewName = '%s.view' % methodName
q = self._getQueryDict({'query': query, 'artistCount': artistCount,
'artistOffset': artistOffset, 'albumCount': albumCount,
'albumOffset': albumOffset, 'songCount': songCount,
'songOffset': songOffset, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search3(self, query, artistCount=20, artistOffset=0, albumCount=20,
albumOffset=0, songCount=20, songOffset=0, musicFolderId=None):
"""
since: 1.8.0
Works the same way as search2, but uses ID3 tags for
organization
query:str The search query
artistCount:int Max number of artists to return [default: 20]
artistOffset:int Search offset for artists (for paging) [default: 0]
albumCount:int Max number of albums to return [default: 20]
albumOffset:int Search offset for albums (for paging) [default: 0]
songCount:int Max number of songs to return [default: 20]
songOffset:int Search offset for songs (for paging) [default: 0]
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns a dict like the following (search for "Tune Yards":
{u'searchResult3': {u'album': [{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-7',
u'created': u'2012-01-30T12:35:33',
u'duration': 3229,
u'id': 7,
u'name': u'Bird-Brains',
u'songCount': 13},
{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-8',
u'created': u'2011-03-22T15:08:00',
u'duration': 2531,
u'id': 8,
u'name': u'W H O K I L L',
u'songCount': 10}],
u'artist': {u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'song': [{u'album': u'Bird-Brains',
u'albumId': 7,
u'artist': u'Tune-Yards',
u'artistId': 1,
u'bitRate': 160,
u'contentType': u'audio/mpeg',
u'coverArt': 105,
u'created': u'2012-01-30T12:35:33',
u'duration': 328,
u'genre': u'Lo-Fi',
u'id': 107,
u'isDir': False,
u'isVideo': False,
u'parent': 105,
u'path': u'Tune Yards/Bird-Brains/10-tune-yards-fiya.mp3',
u'size': 6588498,
u'suffix': u'mp3',
u'title': u'Fiya',
u'track': 10,
u'type': u'music',
u'year': 2009}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'search3'
viewName = '%s.view' % methodName
q = self._getQueryDict({'query': query, 'artistCount': artistCount,
'artistOffset': artistOffset, 'albumCount': albumCount,
'albumOffset': albumOffset, 'songCount': songCount,
'songOffset': songOffset, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlaylists(self, username=None):
"""
since: 1.0.0
Returns the ID and name of all saved playlists
The "username" option was added in 1.8.0.
username:str If specified, return playlists for this user
rather than for the authenticated user. The
authenticated user must have admin role
if this parameter is used
Returns a dict like the following:
{u'playlists': {u'playlist': [{u'id': u'62656174732e6d3375',
u'name': u'beats'},
{u'id': u'766172696574792e6d3375',
u'name': u'variety'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getPlaylists'
viewName = '%s.view' % methodName
q = self._getQueryDict({'username': username})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlaylist(self, pid):
"""
since: 1.0.0
Returns a listing of files in a saved playlist
id:str The ID of the playlist as returned in getPlaylists()
Returns a dict like the following:
{u'playlist': {u'entry': {u'album': u'The Essential Bob Dylan',
u'artist': u'Bob Dylan',
u'bitRate': 32,
u'contentType': u'audio/mpeg',
u'coverArt': u'2983478293',
u'duration': 984,
u'genre': u'Classic Rock',
u'id': u'982739428',
u'isDir': False,
u'isVideo': False,
u'parent': u'98327428974',
u'path': u"Bob Dylan/Essential Bob Dylan Disc 1/Bob Dylan - The Essential Bob Dylan - 03 - The Times They Are A-Changin'.mp3",
u'size': 3921899,
u'suffix': u'mp3',
u'title': u"The Times They Are A-Changin'",
u'track': 3},
u'id': u'44796c616e2e6d3375',
u'name': u'Dylan'},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getPlaylist'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': pid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createPlaylist(self, playlistId=None, name=None, songIds=[]):
"""
since: 1.2.0
Creates OR updates a playlist. If updating the list, the
playlistId is required. If creating a list, the name is required.
playlistId:str The ID of the playlist to UPDATE
name:str The name of the playlist to CREATE
songIds:list The list of songIds to populate the list with in
either create or update mode. Note that this
list will replace the existing list if updating
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'createPlaylist'
viewName = '%s.view' % methodName
if playlistId == name == None:
raise ArgumentError('You must supply either a playlistId or a name')
if playlistId is not None and name is not None:
raise ArgumentError('You can only supply either a playlistId '
'OR a name, not both')
q = self._getQueryDict({'playlistId': playlistId, 'name': name})
req = self._getRequestWithList(viewName, 'songId', songIds, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePlaylist(self, pid):
"""
since: 1.2.0
Deletes a saved playlist
pid:str ID of the playlist to delete, as obtained by getPlaylists
Returns a dict like the following:
"""
methodName = 'deletePlaylist'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': pid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def download(self, sid):
"""
since: 1.0.0
Downloads a given music file.
sid:str The ID of the music file to download.
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'download'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': sid})
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def stream(self, sid, maxBitRate=0, tformat=None, timeOffset=None,
size=None, estimateContentLength=False):
"""
since: 1.0.0
Downloads a given music file.
sid:str The ID of the music file to download.
maxBitRate:int (since: 1.2.0) If specified, the server will
attempt to limit the bitrate to this value, in
kilobits per second. If set to zero (default), no limit
is imposed. Legal values are: 0, 32, 40, 48, 56, 64,
80, 96, 112, 128, 160, 192, 224, 256 and 320.
tformat:str (since: 1.6.0) Specifies the target format
(e.g. "mp3" or "flv") in case there are multiple
applicable transcodings (since: 1.9.0) You can use
the special value "raw" to disable transcoding
timeOffset:int (since: 1.6.0) Only applicable to video
streaming. Start the stream at the given
offset (in seconds) into the video
size:str (since: 1.6.0) The requested video size in
WxH, for instance 640x480
estimateContentLength:bool (since: 1.8.0) If set to True,
the HTTP Content-Length header
will be set to an estimated
value for trancoded media
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'stream'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': sid, 'maxBitRate': maxBitRate,
'format': tformat, 'timeOffset': timeOffset, 'size': size,
'estimateContentLength': estimateContentLength})
req = self._getRequest(viewName, q)
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def getCoverArt(self, aid, size=None):
"""
since: 1.0.0
Returns a cover art image
aid:str ID string for the cover art image to download
size:int If specified, scale image to this size
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'getCoverArt'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': aid, 'size': size})
req = self._getRequest(viewName, q)
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def scrobble(self, sid, submission=True, listenTime=None):
"""
since: 1.5.0
"Scrobbles" a given music file on last.fm. Requires that the user
has set this up.
Since 1.8.0 you may specify multiple id (and optionally time)
parameters to scrobble multiple files.
Since 1.11.0 this method will also update the play count and
last played timestamp for the song and album. It will also make
the song appear in the "Now playing" page in the web app, and
appear in the list of songs returned by getNowPlaying
sid:str The ID of the file to scrobble
submission:bool Whether this is a "submission" or a "now playing"
notification
listenTime:int (Since 1.8.0) The time (unix timestamp) at
which the song was listened to.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'scrobble'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': sid, 'submission': submission,
'time': self._ts2milli(listenTime)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def changePassword(self, username, password):
"""
since: 1.1.0
Changes the password of an existing Subsonic user. Note that the
user performing this must have admin privileges
username:str The username whose password is being changed
password:str The new password of the user
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'changePassword'
viewName = '%s.view' % methodName
hexPass = 'enc:%s' % self._hexEnc(password)
# There seems to be an issue with some subsonic implementations
# not recognizing the "enc:" precursor to the encoded password and
# encodes the whole "enc:<hex>" as the password. Weird.
#q = {'username': username, 'password': hexPass.lower()}
q = {'username': username, 'password': password}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getUser(self, username):
"""
since: 1.3.0
Get details about a given user, including which auth roles it has.
Can be used to enable/disable certain features in the client, such
as jukebox control
username:str The username to retrieve. You can only retrieve
your own user unless you have admin privs.
Returns a dict like the following:
{u'status': u'ok',
u'user': {u'adminRole': False,
u'commentRole': False,
u'coverArtRole': False,
u'downloadRole': True,
u'jukeboxRole': False,
u'playlistRole': True,
u'podcastRole': False,
u'settingsRole': True,
u'streamRole': True,
u'uploadRole': True,
u'username': u'test'},
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getUser'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getUsers(self):
"""
since 1.8.0
Gets a list of users
returns a dict like the following
{u'status': u'ok',
u'users': {u'user': [{u'adminRole': True,
u'commentRole': True,
u'coverArtRole': True,
u'downloadRole': True,
u'jukeboxRole': True,
u'playlistRole': True,
u'podcastRole': True,
u'scrobblingEnabled': True,
u'settingsRole': True,
u'shareRole': True,
u'streamRole': True,
u'uploadRole': True,
u'username': u'user1'},
...
...
]},
u'version': u'1.10.2',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getUsers'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createUser(self, username, password, email,
ldapAuthenticated=False, adminRole=False, settingsRole=True,
streamRole=True, jukeboxRole=False, downloadRole=False,
uploadRole=False, playlistRole=False, coverArtRole=False,
commentRole=False, podcastRole=False, shareRole=False,
musicFolderId=None):
"""
since: 1.1.0
Creates a new subsonic user, using the parameters defined. See the
documentation at http://subsonic.org for more info on all the roles.
username:str The username of the new user
password:str The password for the new user
email:str The email of the new user
<For info on the boolean roles, see http://subsonic.org for more info>
musicFolderId:int These are the only folders the user has access to
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'createUser'
viewName = '%s.view' % methodName
hexPass = 'enc:%s' % self._hexEnc(password)
q = self._getQueryDict({
'username': username, 'password': hexPass, 'email': email,
'ldapAuthenticated': ldapAuthenticated, 'adminRole': adminRole,
'settingsRole': settingsRole, 'streamRole': streamRole,
'jukeboxRole': jukeboxRole, 'downloadRole': downloadRole,
'uploadRole': uploadRole, 'playlistRole': playlistRole,
'coverArtRole': coverArtRole, 'commentRole': commentRole,
'podcastRole': podcastRole, 'shareRole': shareRole,
'musicFolderId': musicFolderId
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updateUser(self, username, password=None, email=None,
ldapAuthenticated=False, adminRole=False, settingsRole=True,
streamRole=True, jukeboxRole=False, downloadRole=False,
uploadRole=False, playlistRole=False, coverArtRole=False,
commentRole=False, podcastRole=False, shareRole=False,
musicFolderId=None, maxBitRate=0):
"""
since 1.10.1
Modifies an existing Subsonic user.
username:str The username of the user to update.
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
maxBitRate:int The max bitrate for the user. 0 is unlimited
All other args are the same as create user and you can update
whatever item you wish to update for the given username.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'updateUser'
viewName = '%s.view' % methodName
if password is not None:
password = 'enc:%s' % self._hexEnc(password)
q = self._getQueryDict({'username': username, 'password': password,
'email': email, 'ldapAuthenticated': ldapAuthenticated,
'adminRole': adminRole,
'settingsRole': settingsRole, 'streamRole': streamRole,
'jukeboxRole': jukeboxRole, 'downloadRole': downloadRole,
'uploadRole': uploadRole, 'playlistRole': playlistRole,
'coverArtRole': coverArtRole, 'commentRole': commentRole,
'podcastRole': podcastRole, 'shareRole': shareRole,
'musicFolderId': musicFolderId, 'maxBitRate': maxBitRate
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteUser(self, username):
"""
since: 1.3.0
Deletes an existing Subsonic user. Of course, you must have admin
rights for this.
username:str The username of the user to delete
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'deleteUser'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getChatMessages(self, since=1):
"""
since: 1.2.0
Returns the current visible (non-expired) chat messages.
since:int Only return messages newer than this timestamp
NOTE: All times returned are in MILLISECONDS since the Epoch, not
seconds!
Returns a dict like the following:
{u'chatMessages': {u'chatMessage': {u'message': u'testing 123',
u'time': 1303411919872L,
u'username': u'admin'}},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getChatMessages'
viewName = '%s.view' % methodName
q = {'since': self._ts2milli(since)}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def addChatMessage(self, message):
"""
since: 1.2.0
Adds a message to the chat log
message:str The message to add
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'addChatMessage'
viewName = '%s.view' % methodName
q = {'message': message}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbumList(self, ltype, size=10, offset=0, fromYear=None,
toYear=None, genre=None, musicFolderId=None):
"""
since: 1.2.0
Returns a list of random, newest, highest rated etc. albums.
Similar to the album lists on the home page of the Subsonic
web interface
ltype:str The list type. Must be one of the following: random,
newest, highest, frequent, recent,
(since 1.8.0 -> )starred, alphabeticalByName,
alphabeticalByArtist
Since 1.10.1 you can use byYear and byGenre to
list albums in a given year range or genre.
size:int The number of albums to return. Max 500
offset:int The list offset. Use for paging. Max 5000
fromYear:int If you specify the ltype as "byYear", you *must*
specify fromYear
toYear:int If you specify the ltype as "byYear", you *must*
specify toYear
genre:str The name of the genre e.g. "Rock". You must specify
genre if you set the ltype to "byGenre"
musicFolderId:str Only return albums in the music folder with
the given ID. See getMusicFolders()
Returns a dict like the following:
{u'albumList': {u'album': [{u'artist': u'Hank Williams',
u'id': u'3264928374',
u'isDir': True,
u'parent': u'9238479283',
u'title': u'The Original Singles Collection...Plus'},
{u'artist': u'Freundeskreis',
u'coverArt': u'9823749823',
u'id': u'23492834',
u'isDir': True,
u'parent': u'9827492374',
u'title': u'Quadratur des Kreises'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbumList'
viewName = '%s.view' % methodName
q = self._getQueryDict({'type': ltype, 'size': size,
'offset': offset, 'fromYear': fromYear, 'toYear': toYear,
'genre': genre, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbumList2(self, ltype, size=10, offset=0, fromYear=None,
toYear=None, genre=None):
"""
since 1.8.0
Returns a list of random, newest, highest rated etc. albums.
This is similar to getAlbumList, but uses ID3 tags for
organization
ltype:str The list type. Must be one of the following: random,
newest, highest, frequent, recent,
(since 1.8.0 -> )starred, alphabeticalByName,
alphabeticalByArtist
Since 1.10.1 you can use byYear and byGenre to
list albums in a given year range or genre.
size:int The number of albums to return. Max 500
offset:int The list offset. Use for paging. Max 5000
fromYear:int If you specify the ltype as "byYear", you *must*
specify fromYear
toYear:int If you specify the ltype as "byYear", you *must*
specify toYear
genre:str The name of the genre e.g. "Rock". You must specify
genre if you set the ltype to "byGenre"
Returns a dict like the following:
{u'albumList2': {u'album': [{u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-0',
u'created': u'2009-08-28T10:00:44',
u'duration': 3762,
u'id': 0,
u'name': u'100th Window',
u'songCount': 9},
{u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-5',
u'created': u'2003-11-03T22:00:00',
u'duration': 2715,
u'id': 5,
u'name': u'Blue Lines',
u'songCount': 9}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbumList2'
viewName = '%s.view' % methodName
q = self._getQueryDict({'type': ltype, 'size': size,
'offset': offset, 'fromYear': fromYear, 'toYear': toYear,
'genre': genre})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getRandomSongs(self, size=10, genre=None, fromYear=None,
toYear=None, musicFolderId=None):
"""
since 1.2.0
Returns random songs matching the given criteria
size:int The max number of songs to return. Max 500
genre:str Only return songs from this genre
fromYear:int Only return songs after or in this year
toYear:int Only return songs before or in this year
musicFolderId:str Only return songs in the music folder with the
given ID. See getMusicFolders
Returns a dict like the following:
{u'randomSongs': {u'song': [{u'album': u'1998 EP - Airbag (How Am I Driving)',
u'artist': u'Radiohead',
u'bitRate': 320,
u'contentType': u'audio/mpeg',
u'duration': 129,
u'id': u'9284728934',
u'isDir': False,
u'isVideo': False,
u'parent': u'983249823',
u'path': u'Radiohead/1998 EP - Airbag (How Am I Driving)/06 - Melatonin.mp3',
u'size': 5177469,
u'suffix': u'mp3',
u'title': u'Melatonin'},
{u'album': u'Mezmerize',
u'artist': u'System Of A Down',
u'bitRate': 214,
u'contentType': u'audio/mpeg',
u'coverArt': u'23849372894',
u'duration': 176,
u'id': u'28937492834',
u'isDir': False,
u'isVideo': False,
u'parent': u'92837492837',
u'path': u'System Of A Down/Mesmerize/10 - System Of A Down - Old School Hollywood.mp3',
u'size': 4751360,
u'suffix': u'mp3',
u'title': u'Old School Hollywood',
u'track': 10}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getRandomSongs'
viewName = '%s.view' % methodName
q = self._getQueryDict({'size': size, 'genre': genre,
'fromYear': fromYear, 'toYear': toYear,
'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getLyrics(self, artist=None, title=None):
"""
since: 1.2.0
Searches for and returns lyrics for a given song
artist:str The artist name
title:str The song title
Returns a dict like the following for
getLyrics('Bob Dylan', 'Blowin in the wind'):
{u'lyrics': {u'artist': u'Bob Dylan',
u'content': u"How many roads must a man walk down<snip>",
u'title': u"Blowin' in the Wind"},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getLyrics'
viewName = '%s.view' % methodName
q = self._getQueryDict({'artist': artist, 'title': title})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def jukeboxControl(self, action, index=None, sids=[], gain=None,
offset=None):
"""
since: 1.2.0
NOTE: Some options were added as of API version 1.7.0
Controls the jukebox, i.e., playback directly on the server's
audio hardware. Note: The user must be authorized to control
the jukebox
action:str The operation to perform. Must be one of: get,
start, stop, skip, add, clear, remove, shuffle,
setGain, status (added in API 1.7.0),
set (added in API 1.7.0)
index:int Used by skip and remove. Zero-based index of the
song to skip to or remove.
sids:str Used by "add" and "set". ID of song to add to the
jukebox playlist. Use multiple id parameters to
add many songs in the same request. Whether you
are passing one song or many into this, this
parameter MUST be a list
gain:float Used by setGain to control the playback volume.
A float value between 0.0 and 1.0
offset:int (added in API 1.7.0) Used by "skip". Start playing
this many seconds into the track.
"""
methodName = 'jukeboxControl'
viewName = '%s.view' % methodName
q = self._getQueryDict({'action': action, 'index': index,
'gain': gain, 'offset': offset})
req = None
if action == 'add':
# We have to deal with the sids
if not (isinstance(sids, list) or isinstance(sids, tuple)):
raise ArgumentError('If you are adding songs, "sids" must '
'be a list or tuple!')
req = self._getRequestWithList(viewName, 'id', sids, q)
else:
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPodcasts(self, incEpisodes=True, pid=None):
"""
since: 1.6.0
Returns all podcast channels the server subscribes to and their
episodes.
incEpisodes:bool (since: 1.9.0) Whether to include Podcast
episodes in the returned result.
pid:str (since: 1.9.0) If specified, only return
the Podcast channel with this ID.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.6.0',
u'xmlns': u'http://subsonic.org/restapi',
u'podcasts': {u'channel': {u'description': u"Dr Chris Smith...",
u'episode': [{u'album': u'Dr Karl and the Naked Scientist',
u'artist': u'BBC Radio 5 live',
u'bitRate': 64,
u'contentType': u'audio/mpeg',
u'coverArt': u'2f6f7074',
u'description': u'Dr Karl answers all your science related questions.',
u'duration': 2902,
u'genre': u'Podcast',
u'id': 0,
u'isDir': False,
u'isVideo': False,
u'parent': u'2f6f70742f737562736f6e69632f706f6463617374732f4472204b61726c20616e6420746865204e616b656420536369656e74697374',
u'publishDate': u'2011-08-17 22:06:00.0',
u'size': 23313059,
u'status': u'completed',
u'streamId': u'2f6f70742f737562736f6e69632f706f6463617374732f4472204b61726c20616e6420746865204e616b656420536369656e746973742f64726b61726c5f32303131303831382d30343036612e6d7033',
u'suffix': u'mp3',
u'title': u'DrKarl: Peppermints, Chillies & Receptors',
u'year': 2011},
{u'description': u'which is warmer, a bath with bubbles in it or one without? Just one of the stranger science stories tackled this week by Dr Chris Smith and the Naked Scientists!',
u'id': 1,
u'publishDate': u'2011-08-14 21:05:00.0',
u'status': u'skipped',
u'title': u'DrKarl: how many bubbles in your bath? 15 AUG 11'},
...
{u'description': u'Dr Karl joins Rhod to answer all your science questions',
u'id': 9,
u'publishDate': u'2011-07-06 22:12:00.0',
u'status': u'skipped',
u'title': u'DrKarl: 8 Jul 11 The Strange Sound of the MRI Scanner'}],
u'id': 0,
u'status': u'completed',
u'title': u'Dr Karl and the Naked Scientist',
u'url': u'http://downloads.bbc.co.uk/podcasts/fivelive/drkarl/rss.xml'}}
}
See also: http://subsonic.svn.sourceforge.net/viewvc/subsonic/trunk/subsonic-main/src/main/webapp/xsd/podcasts_example_1.xml?view=markup
"""
methodName = 'getPodcasts'
viewName = '%s.view' % methodName
q = self._getQueryDict({'includeEpisodes': incEpisodes,
'id': pid})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getShares(self):
"""
since: 1.6.0
Returns information about shared media this user is allowed to manage
Note that entry can be either a single dict or a list of dicts
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.6.0',
u'xmlns': u'http://subsonic.org/restapi',
u'shares': {u'share': [
{u'created': u'2011-08-18T10:01:35',
u'entry': {u'artist': u'Alice In Chains',
u'coverArt': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e732f416c69636520496e20436861696e732f636f7665722e6a7067',
u'id': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e732f416c69636520496e20436861696e73',
u'isDir': True,
u'parent': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e73',
u'title': u'Alice In Chains'},
u'expires': u'2012-08-18T10:01:35',
u'id': 0,
u'url': u'http://crustymonkey.subsonic.org/share/BuLbF',
u'username': u'admin',
u'visitCount': 0
}]}
}
"""
methodName = 'getShares'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createShare(self, shids=[], description=None, expires=None):
"""
since: 1.6.0
Creates a public URL that can be used by anyone to stream music
or video from the Subsonic server. The URL is short and suitable
for posting on Facebook, Twitter etc. Note: The user must be
authorized to share (see Settings > Users > User is allowed to
share files with anyone).
shids:list[str] A list of ids of songs, albums or videos
to share.
description:str A description that will be displayed to
people visiting the shared media
(optional).
expires:float A timestamp pertaining to the time at
which this should expire (optional)
This returns a structure like you would get back from getShares()
containing just your new share.
"""
methodName = 'createShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'description': description,
'expires': self._ts2milli(expires)})
req = self._getRequestWithList(viewName, 'id', shids, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updateShare(self, shid, description=None, expires=None):
"""
since: 1.6.0
Updates the description and/or expiration date for an existing share
shid:str The id of the share to update
description:str The new description for the share (optional).
expires:float The new timestamp for the expiration time of this
share (optional).
"""
methodName = 'updateShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': shid, 'description': description,
expires: self._ts2milli(expires)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteShare(self, shid):
"""
since: 1.6.0
Deletes an existing share
shid:str The id of the share to delete
Returns a standard response dict
"""
methodName = 'deleteShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': shid})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def setRating(self, id, rating):
"""
since: 1.6.0
Sets the rating for a music file
id:str The id of the item (song/artist/album) to rate
rating:int The rating between 1 and 5 (inclusive), or 0 to remove
the rating
Returns a standard response dict
"""
methodName = 'setRating'
viewName = '%s.view' % methodName
try:
rating = int(rating)
except:
raise ArgumentError('Rating must be an integer between 0 and 5: '
'%r' % rating)
if rating < 0 or rating > 5:
raise ArgumentError('Rating must be an integer between 0 and 5: '
'%r' % rating)
q = self._getQueryDict({'id': id, 'rating': rating})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtists(self):
"""
since 1.8.0
Similar to getIndexes(), but this method uses the ID3 tags to
determine the artist
Returns a dict like the following:
{u'artists': {u'index': [{u'artist': {u'albumCount': 7,
u'coverArt': u'ar-0',
u'id': 0,
u'name': u'Massive Attack'},
u'name': u'M'},
{u'artist': {u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'name': u'T'}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getArtists'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtist(self, id):
"""
since 1.8.0
Returns the info (albums) for an artist. This method uses
the ID3 tags for organization
id:str The artist ID
Returns a dict like the following:
{u'artist': {u'album': [{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-7',
u'created': u'2012-01-30T12:35:33',
u'duration': 3229,
u'id': 7,
u'name': u'Bird-Brains',
u'songCount': 13},
{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-8',
u'created': u'2011-03-22T15:08:00',
u'duration': 2531,
u'id': 8,
u'name': u'W H O K I L L',
u'songCount': 10}],
u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getArtist'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbum(self, id):
"""
since 1.8.0
Returns the info and songs for an album. This method uses
the ID3 tags for organization
id:str The album ID
Returns a dict like the following:
{u'album': {u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-0',
u'created': u'2009-08-28T10:00:44',
u'duration': 3762,
u'id': 0,
u'name': u'100th Window',
u'song': [{u'album': u'100th Window',
u'albumId': 0,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 192,
u'contentType': u'audio/mpeg',
u'coverArt': 2,
u'created': u'2009-08-28T10:00:57',
u'duration': 341,
u'genre': u'Rock',
u'id': 14,
u'isDir': False,
u'isVideo': False,
u'parent': 2,
u'path': u'Massive Attack/100th Window/01 - Future Proof.mp3',
u'size': 8184445,
u'suffix': u'mp3',
u'title': u'Future Proof',
u'track': 1,
u'type': u'music',
u'year': 2003}],
u'songCount': 9},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbum'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSong(self, id):
"""
since 1.8.0
Returns the info for a song. This method uses the ID3
tags for organization
id:str The song ID
Returns a dict like the following:
{u'song': {u'album': u'W H O K I L L',
u'albumId': 8,
u'artist': u'Tune-Yards',
u'artistId': 1,
u'bitRate': 320,
u'contentType': u'audio/mpeg',
u'coverArt': 106,
u'created': u'2011-03-22T15:08:00',
u'discNumber': 1,
u'duration': 192,
u'genre': u'Indie Rock',
u'id': 120,
u'isDir': False,
u'isVideo': False,
u'parent': 106,
u'path': u'Tune Yards/Who Kill/10 Killa.mp3',
u'size': 7692656,
u'suffix': u'mp3',
u'title': u'Killa',
u'track': 10,
u'type': u'music',
u'year': 2011},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getSong'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getVideos(self):
"""
since 1.8.0
Returns all video files
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.8.0',
u'videos': {u'video': {u'bitRate': 384,
u'contentType': u'video/x-matroska',
u'created': u'2012-08-26T13:36:44',
u'duration': 1301,
u'id': 130,
u'isDir': False,
u'isVideo': True,
u'path': u'South Park - 16x07 - Cartman Finds Love.mkv',
u'size': 287309613,
u'suffix': u'mkv',
u'title': u'South Park - 16x07 - Cartman Finds Love',
u'transcodedContentType': u'video/x-flv',
u'transcodedSuffix': u'flv'}},
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getVideos'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getStarred(self, musicFolderId=None):
"""
since 1.8.0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns starred songs, albums and artists
Returns a dict like the following:
{u'starred': {u'album': {u'album': u'Bird-Brains',
u'artist': u'Tune-Yards',
u'coverArt': 105,
u'created': u'2012-01-30T13:16:58',
u'id': 105,
u'isDir': True,
u'parent': 104,
u'starred': u'2012-08-26T13:18:34',
u'title': u'Bird-Brains'},
u'song': [{u'album': u'Mezzanine',
u'albumId': 4,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 256,
u'contentType': u'audio/mpeg',
u'coverArt': 6,
u'created': u'2009-06-15T07:48:28',
u'duration': 298,
u'genre': u'Dub',
u'id': 72,
u'isDir': False,
u'isVideo': False,
u'parent': 6,
u'path': u'Massive Attack/Mezzanine/Massive Attack_02_mezzanine.mp3',
u'size': 9564160,
u'starred': u'2012-08-26T13:19:26',
u'suffix': u'mp3',
u'title': u'Risingson',
u'track': 2,
u'type': u'music'},
{u'album': u'Mezzanine',
u'albumId': 4,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 256,
u'contentType': u'audio/mpeg',
u'coverArt': 6,
u'created': u'2009-06-15T07:48:25',
u'duration': 380,
u'genre': u'Dub',
u'id': 71,
u'isDir': False,
u'isVideo': False,
u'parent': 6,
u'path': u'Massive Attack/Mezzanine/Massive Attack_01_mezzanine.mp3',
u'size': 12179456,
u'starred': u'2012-08-26T13:19:03',
u'suffix': u'mp3',
u'title': u'Angel',
u'track': 1,
u'type': u'music'}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getStarred'
viewName = '%s.view' % methodName
q = {}
if musicFolderId:
q['musicFolderId'] = musicFolderId
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getStarred2(self, musicFolderId=None):
"""
since 1.8.0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns starred songs, albums and artists like getStarred(),
but this uses ID3 tags for organization
Returns a dict like the following:
**See the output from getStarred()**
"""
methodName = 'getStarred2'
viewName = '%s.view' % methodName
q = {}
if musicFolderId:
q['musicFolderId'] = musicFolderId
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updatePlaylist(self, lid, name=None, comment=None, songIdsToAdd=[],
songIndexesToRemove=[]):
"""
since 1.8.0
Updates a playlist. Only the owner of a playlist is allowed to
update it.
lid:str The playlist id
name:str The human readable name of the playlist
comment:str The playlist comment
songIdsToAdd:list A list of song IDs to add to the playlist
songIndexesToRemove:list Remove the songs at the
0 BASED INDEXED POSITIONS in the
playlist, NOT the song ids. Note that
this is always a list.
Returns a normal status response dict
"""
methodName = 'updatePlaylist'
viewName = '%s.view' % methodName
q = self._getQueryDict({'playlistId': lid, 'name': name,
'comment': comment})
if not isinstance(songIdsToAdd, list) or isinstance(songIdsToAdd,
tuple):
songIdsToAdd = [songIdsToAdd]
if not isinstance(songIndexesToRemove, list) or isinstance(
songIndexesToRemove, tuple):
songIndexesToRemove = [songIndexesToRemove]
listMap = {'songIdToAdd': songIdsToAdd,
'songIndexToRemove': songIndexesToRemove}
req = self._getRequestWithLists(viewName, listMap, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAvatar(self, username):
"""
since 1.8.0
Returns the avatar for a user or None if the avatar does not exist
username:str The user to retrieve the avatar for
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'getAvatar'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
try:
res = self._doBinReq(req)
except urllib2.HTTPError:
# Avatar is not set/does not exist, return None
return None
if isinstance(res, dict):
self._checkStatus(res)
return res
def star(self, sids=[], albumIds=[], artistIds=[]):
"""
since 1.8.0
Attaches a star to songs, albums or artists
sids:list A list of song IDs to star
albumIds:list A list of album IDs to star. Use this rather than
"sids" if the client access the media collection
according to ID3 tags rather than file
structure
artistIds:list The ID of an artist to star. Use this rather
than sids if the client access the media
collection according to ID3 tags rather
than file structure
Returns a normal status response dict
"""
methodName = 'star'
viewName = '%s.view' % methodName
if not isinstance(sids, list) or isinstance(sids, tuple):
sids = [sids]
if not isinstance(albumIds, list) or isinstance(albumIds, tuple):
albumIds = [albumIds]
if not isinstance(artistIds, list) or isinstance(artistIds, tuple):
artistIds = [artistIds]
listMap = {'id': sids,
'albumId': albumIds,
'artistId': artistIds}
req = self._getRequestWithLists(viewName, listMap)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def unstar(self, sids=[], albumIds=[], artistIds=[]):
"""
since 1.8.0
Removes a star to songs, albums or artists. Basically, the
same as star in reverse
sids:list A list of song IDs to star
albumIds:list A list of album IDs to star. Use this rather than
"sids" if the client access the media collection
according to ID3 tags rather than file
structure
artistIds:list The ID of an artist to star. Use this rather
than sids if the client access the media
collection according to ID3 tags rather
than file structure
Returns a normal status response dict
"""
methodName = 'unstar'
viewName = '%s.view' % methodName
if not isinstance(sids, list) or isinstance(sids, tuple):
sids = [sids]
if not isinstance(albumIds, list) or isinstance(albumIds, tuple):
albumIds = [albumIds]
if not isinstance(artistIds, list) or isinstance(artistIds, tuple):
artistIds = [artistIds]
listMap = {'id': sids,
'albumId': albumIds,
'artistId': artistIds}
req = self._getRequestWithLists(viewName, listMap)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getGenres(self):
"""
since 1.9.0
Returns all genres
"""
methodName = 'getGenres'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSongsByGenre(self, genre, count=10, offset=0, musicFolderId=None):
"""
since 1.9.0
Returns songs in a given genre
genre:str The genre, as returned by getGenres()
count:int The maximum number of songs to return. Max is 500
default: 10
offset:int The offset if you are paging. default: 0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
"""
methodName = 'getGenres'
viewName = '%s.view' % methodName
q = self._getQueryDict({'genre': genre,
'count': count,
'offset': offset,
'musicFolderId': musicFolderId,
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def hls (self, mid, bitrate=None):
"""
since 1.8.0
Creates an HTTP live streaming playlist for streaming video or
audio HLS is a streaming protocol implemented by Apple and
works by breaking the overall stream into a sequence of small
HTTP-based file downloads. It's supported by iOS and newer
versions of Android. This method also supports adaptive
bitrate streaming, see the bitRate parameter.
mid:str The ID of the media to stream
bitrate:str If specified, the server will attempt to limit the
bitrate to this value, in kilobits per second. If
this parameter is specified more than once, the
server will create a variant playlist, suitable
for adaptive bitrate streaming. The playlist will
support streaming at all the specified bitrates.
The server will automatically choose video dimensions
that are suitable for the given bitrates.
(since: 1.9.0) you may explicitly request a certain
width (480) and height (360) like so:
bitRate=1000@480x360
Returns the raw m3u8 file as a string
"""
methodName = 'hls'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': mid, 'bitrate': bitrate})
req = self._getRequest(viewName, q)
try:
res = self._doBinReq(req)
except urllib2.HTTPError:
# Avatar is not set/does not exist, return None
return None
if isinstance(res, dict):
self._checkStatus(res)
return res.read()
def refreshPodcasts(self):
"""
since: 1.9.0
Tells the server to check for new Podcast episodes. Note: The user
must be authorized for Podcast administration
"""
methodName = 'refreshPodcasts'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createPodcastChannel(self, url):
"""
since: 1.9.0
Adds a new Podcast channel. Note: The user must be authorized
for Podcast administration
url:str The URL of the Podcast to add
"""
methodName = 'createPodcastChannel'
viewName = '%s.view' % methodName
q = {'url': url}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePodcastChannel(self, pid):
"""
since: 1.9.0
Deletes a Podcast channel. Note: The user must be authorized
for Podcast administration
pid:str The ID of the Podcast channel to delete
"""
methodName = 'deletePodcastChannel'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePodcastEpisode(self, pid):
"""
since: 1.9.0
Deletes a Podcast episode. Note: The user must be authorized
for Podcast administration
pid:str The ID of the Podcast episode to delete
"""
methodName = 'deletePodcastEpisode'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def downloadPodcastEpisode(self, pid):
"""
since: 1.9.0
Tells the server to start downloading a given Podcast episode.
Note: The user must be authorized for Podcast administration
pid:str The ID of the Podcast episode to download
"""
methodName = 'downloadPodcastEpisode'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getInternetRadioStations(self):
"""
since: 1.9.0
Returns all internet radio stations
"""
methodName = 'getInternetRadioStations'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getBookmarks(self):
"""
since: 1.9.0
Returns all bookmarks for this user. A bookmark is a position
within a media file
"""
methodName = 'getBookmarks'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createBookmark(self, mid, position, comment=None):
"""
since: 1.9.0
Creates or updates a bookmark (position within a media file).
Bookmarks are personal and not visible to other users
mid:str The ID of the media file to bookmark. If a bookmark
already exists for this file, it will be overwritten
position:int The position (in milliseconds) within the media file
comment:str A user-defined comment
"""
methodName = 'createBookmark'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': mid, 'position': position,
'comment': comment})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteBookmark(self, mid):
"""
since: 1.9.0
Deletes the bookmark for a given file
mid:str The ID of the media file to delete the bookmark from.
Other users' bookmarks are not affected
"""
methodName = 'deleteBookmark'
viewName = '%s.view' % methodName
q = {'id': mid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtistInfo(self, aid, count=20, includeNotPresent=False):
"""
since: 1.11.0
Returns artist info with biography, image URLS and similar artists
using data from last.fm
aid:str The ID of the artist, album or song
count:int The max number of similar artists to return
includeNotPresent:bool Whether to return artists that are not
present in the media library
"""
methodName = 'getArtistInfo'
viewName = '%s.view' % methodName
q = {'id': aid, 'count': count,
'includeNotPresent': includeNotPresent}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtistInfo2(self, aid, count=20, includeNotPresent=False):
"""
since: 1.11.0
Similar to getArtistInfo(), but organizes music according to ID3 tags
aid:str The ID of the artist, album or song
count:int The max number of similar artists to return
includeNotPresent:bool Whether to return artists that are not
present in the media library
"""
methodName = 'getArtistInfo2'
viewName = '%s.view' % methodName
q = {'id': aid, 'count': count,
'includeNotPresent': includeNotPresent}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSimilarSongs(self, iid, count=50):
"""
since 1.11.0
Returns a random collection of songs from the given artist and
similar artists, using data from last.fm. Typically used for
artist radio features.
iid:str The artist, album, or song ID
count:int Max number of songs to return
"""
methodName = 'getSimilarSongs'
viewName = '%s.view' % methodName
q = {'id': iid, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSimilarSongs2(self, iid, count=50):
"""
since 1.11.0
Similar to getSimilarSongs(), but organizes music according to
ID3 tags
iid:str The artist, album, or song ID
count:int Max number of songs to return
"""
methodName = 'getSimilarSongs2'
viewName = '%s.view' % methodName
q = {'id': iid, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def savePlayQueue(self, qids, current=None, position=None):
"""
since 1.12.0
qid:list[int] The list of song ids in the play queue
current:int The id of the current playing song
position:int The position, in milliseconds, within the current
playing song
Saves the state of the play queue for this user. This includes
the tracks in the play queue, the currently playing track, and
the position within this track. Typically used to allow a user to
move between different clients/apps while retaining the same play
queue (for instance when listening to an audio book).
"""
methodName = 'savePlayQueue'
viewName = '%s.view' % methodName
if not isinstance(qids, (tuple, list)):
qids = [qids]
q = self._getQueryDict({'current': current, 'position': position})
req = self._getRequestWithLists(viewName, {'id': qids}, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlayQueue(self):
"""
since 1.12.0
Returns the state of the play queue for this user (as set by
savePlayQueue). This includes the tracks in the play queue,
the currently playing track, and the position within this track.
Typically used to allow a user to move between different
clients/apps while retaining the same play queue (for instance
when listening to an audio book).
"""
methodName = 'getPlayQueue'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getTopSongs(self, artist, count=50):
"""
since 1.13.0
Returns the top songs for a given artist
artist:str The artist to get songs for
count:int The number of songs to return
"""
methodName = 'getTopSongs'
viewName = '%s.view' % methodName
q = {'artist': artist, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getNewestPodcasts(self, count=20):
"""
since 1.13.0
Returns the most recently published Podcast episodes
count:int The number of episodes to return
"""
methodName = 'getNewestPodcasts'
viewName = '%s.view' % methodName
q = {'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def scanMediaFolders(self):
"""
This is not an officially supported method of the API
Same as selecting 'Settings' > 'Scan media folders now' with
Subsonic web GUI
Returns True if refresh successful, False otherwise
"""
methodName = 'scanNow'
return self._unsupportedAPIFunction(methodName)
def cleanupDatabase(self):
"""
This is not an officially supported method of the API
Same as selecting 'Settings' > 'Clean-up Database' with Subsonic
web GUI
Returns True if cleanup initiated successfully, False otherwise
Subsonic stores information about all media files ever encountered.
By cleaning up the database, information about files that are
no longer in your media collection is permanently removed.
"""
methodName = 'expunge'
return self._unsupportedAPIFunction(methodName)
def _unsupportedAPIFunction(self, methodName):
"""
base function to call unsupported API methods
Returns True if refresh successful, False otherwise
:rtype : boolean
"""
baseMethod = 'musicFolderSettings'
viewName = '%s.view' % baseMethod
url = '%s:%d/%s/%s?%s' % (self._baseUrl, self._port,
self._separateServerPath(), viewName, methodName)
req = urllib2.Request(url)
res = self._opener.open(req)
res_msg = res.msg.lower()
return res_msg == 'ok'
#
# Private internal methods
#
def _getOpener(self, username, passwd):
creds = b64encode('%s:%s' % (username, passwd))
# Context is only relevent in >= python 2.7.9
https_chain = HTTPSHandlerChain()
if sys.version_info[:3] >= (2, 7, 9) and self._insecure:
https_chain = HTTPSHandlerChain(
context=ssl._create_unverified_context())
opener = urllib2.build_opener(PysHTTPRedirectHandler, https_chain)
opener.addheaders = [('Authorization', 'Basic %s' % creds)]
return opener
def _getQueryDict(self, d):
"""
Given a dictionary, it cleans out all the values set to None
"""
for k, v in d.items():
if v is None:
del d[k]
return d
def _getRequest(self, viewName, query={}):
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
req = urllib2.Request(url, urlencode(qstring))
return req
def _getRequestWithList(self, viewName, listName, alist, query={}):
"""
Like _getRequest, but allows appending a number of items with the
same key (listName). This bypasses the limitation of urlencode()
"""
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
data = StringIO()
data.write(urlencode(qstring))
for i in alist:
data.write('&%s' % urlencode({listName: i}))
req = urllib2.Request(url, data.getvalue())
return req
def _getRequestWithLists(self, viewName, listMap, query={}):
"""
Like _getRequestWithList(), but you must pass a dictionary
that maps the listName to the list. This allows for multiple
list parameters to be used, like in updatePlaylist()
viewName:str The name of the view
listMap:dict A mapping of listName to a list of entries
query:dict The normal query dict
"""
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
data = StringIO()
data.write(urlencode(qstring))
for k, l in listMap.iteritems():
for i in l:
data.write('&%s' % urlencode({k: i}))
req = urllib2.Request(url, data.getvalue())
return req
def _doInfoReq(self, req):
# Returns a parsed dictionary version of the result
res = self._opener.open(req)
dres = json.loads(res.read())
return dres['subsonic-response']
def _doBinReq(self, req):
res = self._opener.open(req)
contType = res.info().getheader('Content-Type')
if contType:
if contType.startswith('text/html') or \
contType.startswith('application/json'):
dres = json.loads(res.read())
return dres['subsonic-response']
return res
def _checkStatus(self, result):
if result['status'] == 'ok':
return True
elif result['status'] == 'failed':
exc = getExcByCode(result['error']['code'])
raise exc(result['error']['message'])
def _hexEnc(self, raw):
"""
Returns a "hex encoded" string per the Subsonic api docs
raw:str The string to hex encode
"""
ret = ''
for c in raw:
ret += '%02X' % ord(c)
return ret
def _ts2milli(self, ts):
"""
For whatever reason, Subsonic uses timestamps in milliseconds since
the unix epoch. I have no idea what need there is of this precision,
but this will just multiply the timestamp times 1000 and return the int
"""
if ts is None:
return None
return int(ts * 1000)
def _separateServerPath(self):
"""
separate REST portion of URL from base server path.
"""
return urllib2.splithost(self._serverPath)[1].split('/')[0]
def _fixLastModified(self, data):
"""
This will recursively walk through a data structure and look for
a dict key/value pair where the key is "lastModified" and change
the shitty java millisecond timestamp to a real unix timestamp
of SECONDS since the unix epoch. JAVA SUCKS!
"""
if isinstance(data, dict):
for k, v in data.items():
if k == 'lastModified':
data[k] = long(v) / 1000.0
return
elif isinstance(v, (tuple, list, dict)):
return self._fixLastModified(v)
elif isinstance(data, (list, tuple)):
for item in data:
if isinstance(item, (list, tuple, dict)):
return self._fixLastModified(item)
def _process_netrc(self, use_netrc):
"""
The use_netrc var is either a boolean, which means we should use
the user's default netrc, or a string specifying a path to a
netrc formatted file
use_netrc:bool|str Either set to True to use the user's default
netrc file or a string specifying a specific
netrc file to use
"""
if not use_netrc:
raise CredentialError('useNetrc must be either a boolean "True" '
'or a string representing a path to a netrc file, '
'not {0}'.format(repr(use_netrc)))
if isinstance(use_netrc, bool) and use_netrc:
self._netrc = netrc()
else:
# This should be a string specifying a path to a netrc file
self._netrc = netrc(os.path.expanduser(use_netrc))
auth = self._netrc.authenticators(self._hostname)
if not auth:
raise CredentialError('No machine entry found for {0} in '
'your netrc file'.format(self._hostname))
# If we get here, we have credentials
self._username = auth[0]
self._rawPass = auth[2]
|
ties/py-sonic
|
libsonic/connection.py
|
Python
|
gpl-3.0
| 100,484
| 0.001513
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('universidades', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='universidademodel',
name='nome',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='universidademodel',
name='sigla',
field=models.CharField(max_length=32),
preserve_default=True,
),
]
|
MCRSoftwares/AcadSocial
|
universidades/migrations/0002_auto_20150118_1319.py
|
Python
|
gpl-2.0
| 640
| 0
|
from .common import *
INTERNAL_IPS = ['127.0.0.1', ]
CORS_ORIGIN_WHITELIST = (
'localhost:8000',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
Q_CLUSTER = {
'name': 'DjangORM',
'workers': 2,
'timeout': 90,
'retry': 120,
'queue_limit': 50,
'bulk': 10,
'orm': 'default',
'catch_up': False # do not replay missed schedules past
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'reminders': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'messages': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
|
spacedogXYZ/sms_checkin
|
sms_checkin/settings/development.py
|
Python
|
agpl-3.0
| 883
| 0.001133
|
CONFIG_SCREEN = 'config'
RUNNING_SCREEN = 'running'
SUCCESS_SCREEN = 'success'
ERROR_SCREEN = 'error'
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/gooey/gui/windows/views.py
|
Python
|
mit
| 111
| 0.036036
|
from itertools import combinations
START_P_HP = 100
START_P_DMG = 0
START_P_A = 0
START_B_HP = 100
START_B_DMG = 8
START_B_A = 2
WEAPONS = [ [8,4,0], [10,5,0], [25,6,0], [40,7,0], [74,8,0] ]
ARMOR = [ [13,0,1], [31,0,2], [53,0,3], [75,0,4], [102,0,5] ]
#Include 'no armor' option
ARMOR.append([0,0,0])
RINGS = [ [25,1,0], [50,2,0], [100,3,0], [20,0,1], [40,0,2], [80,0,3] ]
#Include 'no ring' options
RINGS.append([0,0,0])
RINGS.append([0,0,0])
def main():
cost = None
#1 Weapon
for w in combinations(WEAPONS, 1):
#0-1 Armor
for a in combinations(ARMOR, 1):
#0-2 Rings
for r in combinations(RINGS, 2):
bonuses = calc_bonuses(w,a,r)
p_hp = START_P_HP
p_cost = bonuses[0]
p_dmg = bonuses[1] + START_P_DMG
p_a = bonuses[2] + START_P_A
win = is_win(START_B_HP, START_B_DMG, START_B_A, p_hp, p_dmg, p_a)
#We are seeking to lose the fight, so not win
#We are also looking for highest cost
if not win and (cost is None or p_cost > cost):
cost = p_cost
print cost
def is_win(b_hp, b_dmg, b_a, p_hp, p_dmg, p_a):
b_dmg = max(b_dmg - p_a, 1)
p_dmg = max(p_dmg - b_a, 1)
#<= because we start first
return (b_hp / p_dmg) <= (p_hp / b_dmg)
def calc_bonuses(w,a,r):
ret = [0, 0, 0]
for i in [w,a,r]:
for j in i:
ret[0] += j[0]
ret[1] += j[1]
ret[2] += j[2]
return ret
if __name__ == "__main__":
main()
|
tosmun/AdventOfCode
|
solutions/day21/p2/main.py
|
Python
|
apache-2.0
| 1,368
| 0.067982
|
import datetime
import sys
import pdb
from directory import directory
if False:
pdb.set_trace() # avoid warning message from pyflakes
class Logger(object):
# from stack overflow: how do i duplicat sys stdout to a log file in python
def __init__(self, logfile_path=None, logfile_mode='w', base_name=None):
def path(s):
return directory('log') + s + datetime.datetime.now().isoformat('T') + '.log'
self.terminal = sys.stdout
clean_path = logfile_path.replace(':', '-') if base_name is None else path(base_name)
self.log = open(clean_path, logfile_mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush():
pass
if False:
# usage example
sys.stdout = Logger('path/to/log/file')
# now print statements write on both stdout and the log file
|
rlowrance/re-local-linear
|
Logger.py
|
Python
|
mit
| 886
| 0.003386
|
"""
Higher order classes and functions for Libvirt Sandbox (lxc) container testing
:copyright: 2013 Red Hat Inc.
"""
import datetime
import time
import logging
import lvsb_base
# This utility function lets test-modules quickly create a list of all
# sandbox aggregate types, themselves containing a list of individual
# sandboxes.
def make_sandboxes(params, env, extra_ns=None):
"""
Return list of instantiated lvsb_testsandboxes classes from params
:param params: an undiluted Params instance
:param env: the current env instance
:param extra_ns: An extra, optional namespace to search for classes
"""
namespace = globals() # stuff in this module
# For specialized sandbox types, allow their class to be defined
# inside test module or elsewhere.
if extra_ns is not None:
namespace.update(extra_ns) # copy in additional symbols
names = namespace.keys()
# Test may require more than one sandbox agregator class
pobs = params.objects('lvsb_testsandboxes') # manditory parameter
# filter out non-TestSandboxes subclasses
for name in names:
try:
if not issubclass(namespace[name], lvsb_base.TestSandboxes):
# Working on name list, okay to modify dict
del namespace[name]
except TypeError:
# Symbol wasn't a class, just ignore it
pass
# Return a list of instantiated sandbox_testsandboxes's classes
return [namespace[type_name](params, env) for type_name in pobs]
# TestSandboxes subclasses defined below, or inside other namespaces like
# a test module. They simply help the test-module iterate over many
# aggregate manager classes and the sandboxes they contain.
class TestSimpleSandboxes(lvsb_base.TestSandboxes):
"""
Simplistic sandbox aggregate manager that just executes a command
"""
def __init__(self, params, env):
"""
Initialize to run, all SandboxCommandBase's
"""
super(TestSimpleSandboxes, self).__init__(params, env)
self.init_sandboxes() # create instances of SandboxCommandBase
# Point all of them at the same local uri
self.for_each(lambda sb: sb.add_optarg('-c', self.uri))
# Use each instances name() method to produce name argument
self.for_each(lambda sb: sb.add_optarg('-n', sb.name))
# Command should follow after a --
self.for_each(lambda sb: sb.add_mm())
# Each one gets the same command (that's why it's simple)
self.for_each(lambda sb: sb.add_pos(self.command))
def results(self, each_timeout=5):
"""
Run sandboxe(s), allowing each_timeout to complete, return output list
"""
# Sandboxes run asynchronously, prevent them from running forever
start = datetime.datetime.now()
total_timeout_seconds = each_timeout * self.count
timeout_at = start + datetime.timedelta(seconds=total_timeout_seconds)
# No need to write a method just to call the run method
self.for_each(lambda sb: sb.run())
while datetime.datetime.now() < timeout_at:
# Wait until number of running sandboxes is zero
if bool(self.are_running()):
time.sleep(0.1) # Don't busy-wait
continue
else: # none are running
break
# Needed for accurate time in logging message below
end = datetime.datetime.now()
# Needed for logging message if none exited before timeout
still_running = self.are_running()
# Cause all exited sessions to clean up when sb.stop() called
self.for_each(lambda sb: sb.auto_clean(True))
# If raise, auto_clean will make sure cleanup happens
if bool(still_running):
raise lvsb_base.SandboxException("%d of %d sandboxes are still "
"running after "
"the timeout of %d seconds."
% (still_running,
self.count,
total_timeout_seconds))
# Kill off all sandboxes, just to be safe
self.for_each(lambda sb: sb.stop())
logging.info("%d sandboxe(s) finished in %s", self.count,
end - start)
# Return a list of stdout contents from each
return self.for_each(lambda sb: sb.recv())
|
spiceqa/virt-test
|
virttest/lvsb.py
|
Python
|
gpl-2.0
| 4,505
| 0
|
import boto
from boto.swf.exceptions import SWFResponseError
import sure # noqa
from moto import mock_swf_deprecated
# RegisterDomain endpoint
@mock_swf_deprecated
def test_register_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
all_domains = conn.list_domains("REGISTERED")
domain = all_domains["domainInfos"][0]
domain["name"].should.equal("test-domain")
domain["status"].should.equal("REGISTERED")
domain["description"].should.equal("A test domain")
@mock_swf_deprecated
def test_register_already_existing_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.register_domain.when.called_with(
"test-domain", "60", description="A test domain"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_register_with_wrong_parameter_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain.when.called_with(
"test-domain", 60, description="A test domain"
).should.throw(SWFResponseError)
# ListDomains endpoint
@mock_swf_deprecated
def test_list_domains_order():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("b-test-domain", "60")
conn.register_domain("a-test-domain", "60")
conn.register_domain("c-test-domain", "60")
all_domains = conn.list_domains("REGISTERED")
names = [domain["name"] for domain in all_domains["domainInfos"]]
names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"])
@mock_swf_deprecated
def test_list_domains_reverse_order():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("b-test-domain", "60")
conn.register_domain("a-test-domain", "60")
conn.register_domain("c-test-domain", "60")
all_domains = conn.list_domains("REGISTERED", reverse_order=True)
names = [domain["name"] for domain in all_domains["domainInfos"]]
names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"])
# DeprecateDomain endpoint
@mock_swf_deprecated
def test_deprecate_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.deprecate_domain("test-domain")
all_domains = conn.list_domains("DEPRECATED")
domain = all_domains["domainInfos"][0]
domain["name"].should.equal("test-domain")
@mock_swf_deprecated
def test_deprecate_already_deprecated_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.deprecate_domain("test-domain")
conn.deprecate_domain.when.called_with(
"test-domain"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_deprecate_non_existent_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.deprecate_domain.when.called_with(
"non-existent"
).should.throw(SWFResponseError)
# DescribeDomain endpoint
@mock_swf_deprecated
def test_describe_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
domain = conn.describe_domain("test-domain")
domain["configuration"][
"workflowExecutionRetentionPeriodInDays"].should.equal("60")
domain["domainInfo"]["description"].should.equal("A test domain")
domain["domainInfo"]["name"].should.equal("test-domain")
domain["domainInfo"]["status"].should.equal("REGISTERED")
@mock_swf_deprecated
def test_describe_non_existent_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.describe_domain.when.called_with(
"non-existent"
).should.throw(SWFResponseError)
|
botify-labs/moto
|
tests/test_swf/responses/test_domains.py
|
Python
|
apache-2.0
| 3,834
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import traceback
from argparse import ArgumentParser
class SRDKRunError(Exception):
def __init__(self, message):
self.msg = message
def run_commands(cmds):
for cmd in cmds:
cmd = u" ".join(cmd)
print("Rhedeg %s" % cmd)
returncode = os.system(cmd)
try:
if returncode != 0:
exception_str = ["Problem yn rhedeg y gorchymyn:", " %s" % cmd]
raise SRDKRunError(u"\n".join(exception_str))
except SRDKRunError, arg:
print 'Exception:', arg.msg
def train_singleuser(userid, **args):
"""Hyfforddi model acwstig HTK / Train HTK acoustic model"""
srdk_cmds = []
print "SRDK_Train : %s" % userid
if userid :
srdk_cmds.append(["rm -rf results/" + userid])
srdk_cmds.append(["mkdir -p results/" + userid])
srdk_cmds.append(["SRDK_2_PronunciationDictionary"])
srdk_cmds.append(["SRDK_4_Transcriptions"])
if userid:
srdk_cmds.append(["SRDK_5_CodingAudioData " + userid ])
else:
srdk_cmds.append(["SRDK_5_CodingAudioData"])
srdk_cmds.append(["SRDK_6_FlatStart"])
srdk_cmds.append(["SRDK_7_SilenceModels"])
srdk_cmds.append(["SRDK_8_Realign"])
srdk_cmds.append(["SRDK_9_Triphones"])
srdk_cmds.append(["SRDK_10_TiedStateTriphones"])
srdk_cmds.append(["SRDK_11_TestModels"])
if userid:
srdk_cmds.append(["cp recout.mlf results/" + userid])
#srdk_cmds.append(["SRDK_12_Release"])
run_commands(srdk_cmds)
if __name__ == "__main__":
parser = ArgumentParser(description="Sgript creu model acwstig gyda un gorchymun")
parser.add_argument('-u', '--userid', dest="userid", required=False, help="userid cyfrannwr benodol")
parser.set_defaults(func=train_singleuser)
args=parser.parse_args()
try:
args.func(**vars(args))
except SRDKRunError as e:
print ("\n**SRDK ERROR**\n")
print (e)
|
techiaith/seilwaith
|
srdk/htk/SRDK_Train.py
|
Python
|
apache-2.0
| 1,957
| 0.036791
|
#!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Read samples from a UHD device and write to file formatted as binary
outputs single precision complex float values or complex short values
(interleaved 16 bit signed short integers).
"""
from gnuradio import gr, eng_notation
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
n2s = eng_notation.num_to_str
class rx_cfile_block(gr.top_block):
def __init__(self, options, filename):
gr.top_block.__init__(self)
# Create a UHD device source
if options.output_shorts:
self._u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('sc16'))
self._sink = gr.file_sink(gr.sizeof_short*2, filename)
else:
self._u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
self._sink = gr.file_sink(gr.sizeof_gr_complex, filename)
# Set receiver sample rate
self._u.set_samp_rate(options.samp_rate)
# Set receive daughterboard gain
if options.gain is None:
g = self._u.get_gain_range()
options.gain = float(g.start()+g.stop())/2
print "Using mid-point gain of", options.gain, "(", g.start(), "-", g.stop(), ")"
self._u.set_gain(options.gain)
# Set the subdevice spec
if(options.spec):
self._u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Set frequency (tune request takes lo_offset)
if(options.lo_offset is not None):
treq = uhd.tune_request(options.freq, options.lo_offset)
else:
treq = uhd.tune_request(options.freq)
tr = self._u.set_center_freq(treq)
if tr == None:
sys.stderr.write('Failed to set center frequency\n')
raise SystemExit, 1
# Create head block if needed and wire it up
if options.nsamples is None:
self.connect(self._u, self._sink)
else:
if options.output_shorts:
self._head = gr.head(gr.sizeof_short*2, int(options.nsamples))
else:
self._head = gr.head(gr.sizeof_gr_complex, int(options.nsamples))
self.connect(self._u, self._head, self._sink)
input_rate = self._u.get_samp_rate()
if options.verbose:
print "Args: ", options.args
print "Rx gain:", options.gain
print "Rx baseband frequency:", n2s(tr.actual_rf_freq)
print "Rx DDC frequency:", n2s(tr.actual_dsp_freq)
print "Rx Sample Rate:", n2s(input_rate)
if options.nsamples is None:
print "Receiving samples until Ctrl-C"
else:
print "Receving", n2s(options.nsamples), "samples"
if options.output_shorts:
print "Writing 16-bit complex shorts"
else:
print "Writing 32-bit complex floats"
print "Output filename:", filename
def get_options():
usage="%prog: [options] output_filename"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option( "-s","--output-shorts", action="store_true", default=False,
help="output interleaved shorts instead of complex floats")
parser.add_option("-N", "--nsamples", type="eng_float", default=None,
help="number of samples to collect [default=+inf]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output")
parser.add_option("", "--lo-offset", type="eng_float", default=None,
help="set daughterboard LO offset to OFFSET [default=hw default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
return (options, args[0])
if __name__ == '__main__':
(options, filename) = get_options()
tb = rx_cfile_block(options, filename)
try:
tb.run()
except KeyboardInterrupt:
pass
|
n4hy/gnuradio
|
gr-uhd/apps/uhd_rx_cfile.py
|
Python
|
gpl-3.0
| 5,974
| 0.005524
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from leagueofladders import settings
urlpatterns = patterns('',
url(r'^l/', include('leagueofladders.apps.myleague.urls', namespace='myleague')),
url(r'^admin/', include(admin.site.urls)),
url(r'^%s$' % settings.LOGIN_URL[1:], 'django.contrib.auth.views.login'))
|
dylanseago/LeagueOfLadders
|
leagueofladders/urls.py
|
Python
|
apache-2.0
| 420
| 0.004762
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import StorageManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class StorageManagementClient(MultiApiClientMixin, _SDKClient):
"""The Azure Storage Management API.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2021-08-01'
_PROFILE_TAG = "azure.mgmt.storage.StorageManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'usage': '2018-02-01',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None, # type: Optional[str]
base_url="https://management.azure.com", # type: str
profile=KnownProfiles.default, # type: KnownProfiles
**kwargs # type: Any
):
self._config = StorageManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(StorageManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
* 2018-11-01: :mod:`v2018_11_01.models<azure.mgmt.storage.v2018_11_01.models>`
* 2019-04-01: :mod:`v2019_04_01.models<azure.mgmt.storage.v2019_04_01.models>`
* 2019-06-01: :mod:`v2019_06_01.models<azure.mgmt.storage.v2019_06_01.models>`
* 2020-08-01-preview: :mod:`v2020_08_01_preview.models<azure.mgmt.storage.v2020_08_01_preview.models>`
* 2021-01-01: :mod:`v2021_01_01.models<azure.mgmt.storage.v2021_01_01.models>`
* 2021-02-01: :mod:`v2021_02_01.models<azure.mgmt.storage.v2021_02_01.models>`
* 2021-04-01: :mod:`v2021_04_01.models<azure.mgmt.storage.v2021_04_01.models>`
* 2021-06-01: :mod:`v2021_06_01.models<azure.mgmt.storage.v2021_06_01.models>`
* 2021-08-01: :mod:`v2021_08_01.models<azure.mgmt.storage.v2021_08_01.models>`
"""
if api_version == '2015-06-15':
from .v2015_06_15 import models
return models
elif api_version == '2016-01-01':
from .v2016_01_01 import models
return models
elif api_version == '2016-12-01':
from .v2016_12_01 import models
return models
elif api_version == '2017-06-01':
from .v2017_06_01 import models
return models
elif api_version == '2017-10-01':
from .v2017_10_01 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview import models
return models
elif api_version == '2018-07-01':
from .v2018_07_01 import models
return models
elif api_version == '2018-11-01':
from .v2018_11_01 import models
return models
elif api_version == '2019-04-01':
from .v2019_04_01 import models
return models
elif api_version == '2019-06-01':
from .v2019_06_01 import models
return models
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview import models
return models
elif api_version == '2021-01-01':
from .v2021_01_01 import models
return models
elif api_version == '2021-02-01':
from .v2021_02_01 import models
return models
elif api_version == '2021-04-01':
from .v2021_04_01 import models
return models
elif api_version == '2021-06-01':
from .v2021_06_01 import models
return models
elif api_version == '2021-08-01':
from .v2021_08_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def blob_containers(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_02_01.operations.BlobContainersOperations>`
* 2018-03-01-preview: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_03_01_preview.operations.BlobContainersOperations>`
* 2018-07-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_07_01.operations.BlobContainersOperations>`
* 2018-11-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_11_01.operations.BlobContainersOperations>`
* 2019-04-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2019_04_01.operations.BlobContainersOperations>`
* 2019-06-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2019_06_01.operations.BlobContainersOperations>`
* 2020-08-01-preview: :class:`BlobContainersOperations<azure.mgmt.storage.v2020_08_01_preview.operations.BlobContainersOperations>`
* 2021-01-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2021_01_01.operations.BlobContainersOperations>`
* 2021-02-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2021_02_01.operations.BlobContainersOperations>`
* 2021-04-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2021_04_01.operations.BlobContainersOperations>`
* 2021-06-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2021_06_01.operations.BlobContainersOperations>`
* 2021-08-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2021_08_01.operations.BlobContainersOperations>`
"""
api_version = self._get_api_version('blob_containers')
if api_version == '2018-02-01':
from .v2018_02_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import BlobContainersOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import BlobContainersOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import BlobContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'blob_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def blob_inventory_policies(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2019_06_01.operations.BlobInventoryPoliciesOperations>`
* 2020-08-01-preview: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.BlobInventoryPoliciesOperations>`
* 2021-01-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2021_01_01.operations.BlobInventoryPoliciesOperations>`
* 2021-02-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2021_02_01.operations.BlobInventoryPoliciesOperations>`
* 2021-04-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2021_04_01.operations.BlobInventoryPoliciesOperations>`
* 2021-06-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2021_06_01.operations.BlobInventoryPoliciesOperations>`
* 2021-08-01: :class:`BlobInventoryPoliciesOperations<azure.mgmt.storage.v2021_08_01.operations.BlobInventoryPoliciesOperations>`
"""
api_version = self._get_api_version('blob_inventory_policies')
if api_version == '2019-06-01':
from .v2019_06_01.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import BlobInventoryPoliciesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import BlobInventoryPoliciesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'blob_inventory_policies'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def blob_services(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>`
* 2018-11-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_11_01.operations.BlobServicesOperations>`
* 2019-04-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2019_04_01.operations.BlobServicesOperations>`
* 2019-06-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2019_06_01.operations.BlobServicesOperations>`
* 2020-08-01-preview: :class:`BlobServicesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.BlobServicesOperations>`
* 2021-01-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2021_01_01.operations.BlobServicesOperations>`
* 2021-02-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2021_02_01.operations.BlobServicesOperations>`
* 2021-04-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2021_04_01.operations.BlobServicesOperations>`
* 2021-06-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2021_06_01.operations.BlobServicesOperations>`
* 2021-08-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2021_08_01.operations.BlobServicesOperations>`
"""
api_version = self._get_api_version('blob_services')
if api_version == '2018-07-01':
from .v2018_07_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import BlobServicesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import BlobServicesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import BlobServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'blob_services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def deleted_accounts(self):
"""Instance depends on the API version:
* 2020-08-01-preview: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2020_08_01_preview.operations.DeletedAccountsOperations>`
* 2021-01-01: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2021_01_01.operations.DeletedAccountsOperations>`
* 2021-02-01: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2021_02_01.operations.DeletedAccountsOperations>`
* 2021-04-01: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2021_04_01.operations.DeletedAccountsOperations>`
* 2021-06-01: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2021_06_01.operations.DeletedAccountsOperations>`
* 2021-08-01: :class:`DeletedAccountsOperations<azure.mgmt.storage.v2021_08_01.operations.DeletedAccountsOperations>`
"""
api_version = self._get_api_version('deleted_accounts')
if api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import DeletedAccountsOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import DeletedAccountsOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import DeletedAccountsOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import DeletedAccountsOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import DeletedAccountsOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import DeletedAccountsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'deleted_accounts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def encryption_scopes(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2019_06_01.operations.EncryptionScopesOperations>`
* 2020-08-01-preview: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.EncryptionScopesOperations>`
* 2021-01-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2021_01_01.operations.EncryptionScopesOperations>`
* 2021-02-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2021_02_01.operations.EncryptionScopesOperations>`
* 2021-04-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2021_04_01.operations.EncryptionScopesOperations>`
* 2021-06-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2021_06_01.operations.EncryptionScopesOperations>`
* 2021-08-01: :class:`EncryptionScopesOperations<azure.mgmt.storage.v2021_08_01.operations.EncryptionScopesOperations>`
"""
api_version = self._get_api_version('encryption_scopes')
if api_version == '2019-06-01':
from .v2019_06_01.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import EncryptionScopesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import EncryptionScopesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'encryption_scopes'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def file_services(self):
"""Instance depends on the API version:
* 2019-04-01: :class:`FileServicesOperations<azure.mgmt.storage.v2019_04_01.operations.FileServicesOperations>`
* 2019-06-01: :class:`FileServicesOperations<azure.mgmt.storage.v2019_06_01.operations.FileServicesOperations>`
* 2020-08-01-preview: :class:`FileServicesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.FileServicesOperations>`
* 2021-01-01: :class:`FileServicesOperations<azure.mgmt.storage.v2021_01_01.operations.FileServicesOperations>`
* 2021-02-01: :class:`FileServicesOperations<azure.mgmt.storage.v2021_02_01.operations.FileServicesOperations>`
* 2021-04-01: :class:`FileServicesOperations<azure.mgmt.storage.v2021_04_01.operations.FileServicesOperations>`
* 2021-06-01: :class:`FileServicesOperations<azure.mgmt.storage.v2021_06_01.operations.FileServicesOperations>`
* 2021-08-01: :class:`FileServicesOperations<azure.mgmt.storage.v2021_08_01.operations.FileServicesOperations>`
"""
api_version = self._get_api_version('file_services')
if api_version == '2019-04-01':
from .v2019_04_01.operations import FileServicesOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import FileServicesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import FileServicesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import FileServicesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import FileServicesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import FileServicesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import FileServicesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import FileServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'file_services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def file_shares(self):
"""Instance depends on the API version:
* 2019-04-01: :class:`FileSharesOperations<azure.mgmt.storage.v2019_04_01.operations.FileSharesOperations>`
* 2019-06-01: :class:`FileSharesOperations<azure.mgmt.storage.v2019_06_01.operations.FileSharesOperations>`
* 2020-08-01-preview: :class:`FileSharesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.FileSharesOperations>`
* 2021-01-01: :class:`FileSharesOperations<azure.mgmt.storage.v2021_01_01.operations.FileSharesOperations>`
* 2021-02-01: :class:`FileSharesOperations<azure.mgmt.storage.v2021_02_01.operations.FileSharesOperations>`
* 2021-04-01: :class:`FileSharesOperations<azure.mgmt.storage.v2021_04_01.operations.FileSharesOperations>`
* 2021-06-01: :class:`FileSharesOperations<azure.mgmt.storage.v2021_06_01.operations.FileSharesOperations>`
* 2021-08-01: :class:`FileSharesOperations<azure.mgmt.storage.v2021_08_01.operations.FileSharesOperations>`
"""
api_version = self._get_api_version('file_shares')
if api_version == '2019-04-01':
from .v2019_04_01.operations import FileSharesOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import FileSharesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import FileSharesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import FileSharesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import FileSharesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import FileSharesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import FileSharesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import FileSharesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'file_shares'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def local_users(self):
"""Instance depends on the API version:
* 2021-08-01: :class:`LocalUsersOperations<azure.mgmt.storage.v2021_08_01.operations.LocalUsersOperations>`
"""
api_version = self._get_api_version('local_users')
if api_version == '2021-08-01':
from .v2021_08_01.operations import LocalUsersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'local_users'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def management_policies(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_07_01.operations.ManagementPoliciesOperations>`
* 2018-11-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_11_01.operations.ManagementPoliciesOperations>`
* 2019-04-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2019_04_01.operations.ManagementPoliciesOperations>`
* 2019-06-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2019_06_01.operations.ManagementPoliciesOperations>`
* 2020-08-01-preview: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.ManagementPoliciesOperations>`
* 2021-01-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2021_01_01.operations.ManagementPoliciesOperations>`
* 2021-02-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2021_02_01.operations.ManagementPoliciesOperations>`
* 2021-04-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2021_04_01.operations.ManagementPoliciesOperations>`
* 2021-06-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2021_06_01.operations.ManagementPoliciesOperations>`
* 2021-08-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2021_08_01.operations.ManagementPoliciesOperations>`
"""
api_version = self._get_api_version('management_policies')
if api_version == '2018-07-01':
from .v2018_07_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import ManagementPoliciesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import ManagementPoliciesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'management_policies'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def object_replication_policies(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2019_06_01.operations.ObjectReplicationPoliciesOperations>`
* 2020-08-01-preview: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.ObjectReplicationPoliciesOperations>`
* 2021-01-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2021_01_01.operations.ObjectReplicationPoliciesOperations>`
* 2021-02-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2021_02_01.operations.ObjectReplicationPoliciesOperations>`
* 2021-04-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2021_04_01.operations.ObjectReplicationPoliciesOperations>`
* 2021-06-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2021_06_01.operations.ObjectReplicationPoliciesOperations>`
* 2021-08-01: :class:`ObjectReplicationPoliciesOperations<azure.mgmt.storage.v2021_08_01.operations.ObjectReplicationPoliciesOperations>`
"""
api_version = self._get_api_version('object_replication_policies')
if api_version == '2019-06-01':
from .v2019_06_01.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import ObjectReplicationPoliciesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import ObjectReplicationPoliciesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'object_replication_policies'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2017-06-01: :class:`Operations<azure.mgmt.storage.v2017_06_01.operations.Operations>`
* 2017-10-01: :class:`Operations<azure.mgmt.storage.v2017_10_01.operations.Operations>`
* 2018-02-01: :class:`Operations<azure.mgmt.storage.v2018_02_01.operations.Operations>`
* 2018-03-01-preview: :class:`Operations<azure.mgmt.storage.v2018_03_01_preview.operations.Operations>`
* 2018-07-01: :class:`Operations<azure.mgmt.storage.v2018_07_01.operations.Operations>`
* 2018-11-01: :class:`Operations<azure.mgmt.storage.v2018_11_01.operations.Operations>`
* 2019-04-01: :class:`Operations<azure.mgmt.storage.v2019_04_01.operations.Operations>`
* 2019-06-01: :class:`Operations<azure.mgmt.storage.v2019_06_01.operations.Operations>`
* 2020-08-01-preview: :class:`Operations<azure.mgmt.storage.v2020_08_01_preview.operations.Operations>`
* 2021-01-01: :class:`Operations<azure.mgmt.storage.v2021_01_01.operations.Operations>`
* 2021-02-01: :class:`Operations<azure.mgmt.storage.v2021_02_01.operations.Operations>`
* 2021-04-01: :class:`Operations<azure.mgmt.storage.v2021_04_01.operations.Operations>`
* 2021-06-01: :class:`Operations<azure.mgmt.storage.v2021_06_01.operations.Operations>`
* 2021-08-01: :class:`Operations<azure.mgmt.storage.v2021_08_01.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2017-06-01':
from .v2017_06_01.operations import Operations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import Operations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import Operations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import Operations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import Operations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import Operations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import Operations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import Operations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import Operations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import Operations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import Operations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import Operations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import Operations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2019_06_01.operations.PrivateEndpointConnectionsOperations>`
* 2020-08-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2020_08_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2021-01-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2021_01_01.operations.PrivateEndpointConnectionsOperations>`
* 2021-02-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2021_02_01.operations.PrivateEndpointConnectionsOperations>`
* 2021-04-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2021_04_01.operations.PrivateEndpointConnectionsOperations>`
* 2021-06-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2021_06_01.operations.PrivateEndpointConnectionsOperations>`
* 2021-08-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.storage.v2021_08_01.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2019-06-01':
from .v2019_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2019_06_01.operations.PrivateLinkResourcesOperations>`
* 2020-08-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.PrivateLinkResourcesOperations>`
* 2021-01-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2021_01_01.operations.PrivateLinkResourcesOperations>`
* 2021-02-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2021_02_01.operations.PrivateLinkResourcesOperations>`
* 2021-04-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2021_04_01.operations.PrivateLinkResourcesOperations>`
* 2021-06-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2021_06_01.operations.PrivateLinkResourcesOperations>`
* 2021-08-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.storage.v2021_08_01.operations.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2019-06-01':
from .v2019_06_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import PrivateLinkResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def queue(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`QueueOperations<azure.mgmt.storage.v2019_06_01.operations.QueueOperations>`
* 2020-08-01-preview: :class:`QueueOperations<azure.mgmt.storage.v2020_08_01_preview.operations.QueueOperations>`
* 2021-01-01: :class:`QueueOperations<azure.mgmt.storage.v2021_01_01.operations.QueueOperations>`
* 2021-02-01: :class:`QueueOperations<azure.mgmt.storage.v2021_02_01.operations.QueueOperations>`
* 2021-04-01: :class:`QueueOperations<azure.mgmt.storage.v2021_04_01.operations.QueueOperations>`
* 2021-06-01: :class:`QueueOperations<azure.mgmt.storage.v2021_06_01.operations.QueueOperations>`
* 2021-08-01: :class:`QueueOperations<azure.mgmt.storage.v2021_08_01.operations.QueueOperations>`
"""
api_version = self._get_api_version('queue')
if api_version == '2019-06-01':
from .v2019_06_01.operations import QueueOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import QueueOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import QueueOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import QueueOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import QueueOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import QueueOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import QueueOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'queue'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def queue_services(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2019_06_01.operations.QueueServicesOperations>`
* 2020-08-01-preview: :class:`QueueServicesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.QueueServicesOperations>`
* 2021-01-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2021_01_01.operations.QueueServicesOperations>`
* 2021-02-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2021_02_01.operations.QueueServicesOperations>`
* 2021-04-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2021_04_01.operations.QueueServicesOperations>`
* 2021-06-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2021_06_01.operations.QueueServicesOperations>`
* 2021-08-01: :class:`QueueServicesOperations<azure.mgmt.storage.v2021_08_01.operations.QueueServicesOperations>`
"""
api_version = self._get_api_version('queue_services')
if api_version == '2019-06-01':
from .v2019_06_01.operations import QueueServicesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import QueueServicesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import QueueServicesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import QueueServicesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import QueueServicesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import QueueServicesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import QueueServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'queue_services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def skus(self):
"""Instance depends on the API version:
* 2017-06-01: :class:`SkusOperations<azure.mgmt.storage.v2017_06_01.operations.SkusOperations>`
* 2017-10-01: :class:`SkusOperations<azure.mgmt.storage.v2017_10_01.operations.SkusOperations>`
* 2018-02-01: :class:`SkusOperations<azure.mgmt.storage.v2018_02_01.operations.SkusOperations>`
* 2018-03-01-preview: :class:`SkusOperations<azure.mgmt.storage.v2018_03_01_preview.operations.SkusOperations>`
* 2018-07-01: :class:`SkusOperations<azure.mgmt.storage.v2018_07_01.operations.SkusOperations>`
* 2018-11-01: :class:`SkusOperations<azure.mgmt.storage.v2018_11_01.operations.SkusOperations>`
* 2019-04-01: :class:`SkusOperations<azure.mgmt.storage.v2019_04_01.operations.SkusOperations>`
* 2019-06-01: :class:`SkusOperations<azure.mgmt.storage.v2019_06_01.operations.SkusOperations>`
* 2020-08-01-preview: :class:`SkusOperations<azure.mgmt.storage.v2020_08_01_preview.operations.SkusOperations>`
* 2021-01-01: :class:`SkusOperations<azure.mgmt.storage.v2021_01_01.operations.SkusOperations>`
* 2021-02-01: :class:`SkusOperations<azure.mgmt.storage.v2021_02_01.operations.SkusOperations>`
* 2021-04-01: :class:`SkusOperations<azure.mgmt.storage.v2021_04_01.operations.SkusOperations>`
* 2021-06-01: :class:`SkusOperations<azure.mgmt.storage.v2021_06_01.operations.SkusOperations>`
* 2021-08-01: :class:`SkusOperations<azure.mgmt.storage.v2021_08_01.operations.SkusOperations>`
"""
api_version = self._get_api_version('skus')
if api_version == '2017-06-01':
from .v2017_06_01.operations import SkusOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import SkusOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import SkusOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import SkusOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import SkusOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import SkusOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import SkusOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import SkusOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import SkusOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import SkusOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import SkusOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import SkusOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import SkusOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import SkusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'skus'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def storage_accounts(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`StorageAccountsOperations<azure.mgmt.storage.v2015_06_15.operations.StorageAccountsOperations>`
* 2016-01-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2016_01_01.operations.StorageAccountsOperations>`
* 2016-12-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2016_12_01.operations.StorageAccountsOperations>`
* 2017-06-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2017_06_01.operations.StorageAccountsOperations>`
* 2017-10-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2017_10_01.operations.StorageAccountsOperations>`
* 2018-02-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_02_01.operations.StorageAccountsOperations>`
* 2018-03-01-preview: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_03_01_preview.operations.StorageAccountsOperations>`
* 2018-07-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_07_01.operations.StorageAccountsOperations>`
* 2018-11-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2018_11_01.operations.StorageAccountsOperations>`
* 2019-04-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2019_04_01.operations.StorageAccountsOperations>`
* 2019-06-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2019_06_01.operations.StorageAccountsOperations>`
* 2020-08-01-preview: :class:`StorageAccountsOperations<azure.mgmt.storage.v2020_08_01_preview.operations.StorageAccountsOperations>`
* 2021-01-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2021_01_01.operations.StorageAccountsOperations>`
* 2021-02-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2021_02_01.operations.StorageAccountsOperations>`
* 2021-04-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2021_04_01.operations.StorageAccountsOperations>`
* 2021-06-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2021_06_01.operations.StorageAccountsOperations>`
* 2021-08-01: :class:`StorageAccountsOperations<azure.mgmt.storage.v2021_08_01.operations.StorageAccountsOperations>`
"""
api_version = self._get_api_version('storage_accounts')
if api_version == '2015-06-15':
from .v2015_06_15.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-01-01':
from .v2016_01_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import StorageAccountsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'storage_accounts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def table(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`TableOperations<azure.mgmt.storage.v2019_06_01.operations.TableOperations>`
* 2020-08-01-preview: :class:`TableOperations<azure.mgmt.storage.v2020_08_01_preview.operations.TableOperations>`
* 2021-01-01: :class:`TableOperations<azure.mgmt.storage.v2021_01_01.operations.TableOperations>`
* 2021-02-01: :class:`TableOperations<azure.mgmt.storage.v2021_02_01.operations.TableOperations>`
* 2021-04-01: :class:`TableOperations<azure.mgmt.storage.v2021_04_01.operations.TableOperations>`
* 2021-06-01: :class:`TableOperations<azure.mgmt.storage.v2021_06_01.operations.TableOperations>`
* 2021-08-01: :class:`TableOperations<azure.mgmt.storage.v2021_08_01.operations.TableOperations>`
"""
api_version = self._get_api_version('table')
if api_version == '2019-06-01':
from .v2019_06_01.operations import TableOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import TableOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import TableOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import TableOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import TableOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import TableOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import TableOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'table'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def table_services(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`TableServicesOperations<azure.mgmt.storage.v2019_06_01.operations.TableServicesOperations>`
* 2020-08-01-preview: :class:`TableServicesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.TableServicesOperations>`
* 2021-01-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_01_01.operations.TableServicesOperations>`
* 2021-02-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_02_01.operations.TableServicesOperations>`
* 2021-04-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_04_01.operations.TableServicesOperations>`
* 2021-06-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_06_01.operations.TableServicesOperations>`
* 2021-08-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_08_01.operations.TableServicesOperations>`
"""
api_version = self._get_api_version('table_services')
if api_version == '2019-06-01':
from .v2019_06_01.operations import TableServicesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import TableServicesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import TableServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'table_services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usage(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`UsageOperations<azure.mgmt.storage.v2015_06_15.operations.UsageOperations>`
* 2016-01-01: :class:`UsageOperations<azure.mgmt.storage.v2016_01_01.operations.UsageOperations>`
* 2016-12-01: :class:`UsageOperations<azure.mgmt.storage.v2016_12_01.operations.UsageOperations>`
* 2017-06-01: :class:`UsageOperations<azure.mgmt.storage.v2017_06_01.operations.UsageOperations>`
* 2017-10-01: :class:`UsageOperations<azure.mgmt.storage.v2017_10_01.operations.UsageOperations>`
* 2018-02-01: :class:`UsageOperations<azure.mgmt.storage.v2018_02_01.operations.UsageOperations>`
"""
api_version = self._get_api_version('usage')
if api_version == '2015-06-15':
from .v2015_06_15.operations import UsageOperations as OperationClass
elif api_version == '2016-01-01':
from .v2016_01_01.operations import UsageOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import UsageOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import UsageOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import UsageOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import UsageOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usage'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usages(self):
"""Instance depends on the API version:
* 2018-03-01-preview: :class:`UsagesOperations<azure.mgmt.storage.v2018_03_01_preview.operations.UsagesOperations>`
* 2018-07-01: :class:`UsagesOperations<azure.mgmt.storage.v2018_07_01.operations.UsagesOperations>`
* 2018-11-01: :class:`UsagesOperations<azure.mgmt.storage.v2018_11_01.operations.UsagesOperations>`
* 2019-04-01: :class:`UsagesOperations<azure.mgmt.storage.v2019_04_01.operations.UsagesOperations>`
* 2019-06-01: :class:`UsagesOperations<azure.mgmt.storage.v2019_06_01.operations.UsagesOperations>`
* 2020-08-01-preview: :class:`UsagesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.UsagesOperations>`
* 2021-01-01: :class:`UsagesOperations<azure.mgmt.storage.v2021_01_01.operations.UsagesOperations>`
* 2021-02-01: :class:`UsagesOperations<azure.mgmt.storage.v2021_02_01.operations.UsagesOperations>`
* 2021-04-01: :class:`UsagesOperations<azure.mgmt.storage.v2021_04_01.operations.UsagesOperations>`
* 2021-06-01: :class:`UsagesOperations<azure.mgmt.storage.v2021_06_01.operations.UsagesOperations>`
* 2021-08-01: :class:`UsagesOperations<azure.mgmt.storage.v2021_08_01.operations.UsagesOperations>`
"""
api_version = self._get_api_version('usages')
if api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import UsagesOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import UsagesOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import UsagesOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import UsagesOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import UsagesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import UsagesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import UsagesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import UsagesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import UsagesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import UsagesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import UsagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usages'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/_storage_management_client.py
|
Python
|
mit
| 63,288
| 0.006652
|
from . uuid64 import *
|
jdowner/uuid64
|
uuid64/__init__.py
|
Python
|
mit
| 23
| 0
|
#!/usr/bin/env python
# take a large pcap and dump the data into a CSV so it can be analysed by something like R.
#
# This version we want to know what the source IP is, what the protocol is and based on those
# peices of info run a function to grab that data and write a line to a CSV file
#
# Ignore all traffic sourced from the self IP, pass self ip as on arg
#
# Parse HTTP data decoded by tshark into additional content.
#
# Prereqs: pyshark, http://kiminewt.github.io/pyshark/
import pyshark, sys, getopt
from datetime import datetime
# input and output files
ifile=''
ofile=''
selfip=''
# read command line args and bail if not complete
if len(sys.argv) != 9:
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto " % sys.argv[0])
exit()
# Use getopt to avoid param order errors
opts, args = getopt.getopt(sys.argv[1:],"i:o:s:l:")
for o, a in opts:
if o == '-i':
ifile=a
elif o == '-o':
ofile=a
elif o == '-s':
selfip=a
elif o == '-l':
l4proto=a
elif o == '-h':
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto" % sys.argv[0])
else:
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto" % sys.argv[0])
# Functions
def evall4plist(plist):
protolist=[]
#plist = plist.strip()
if plist.find(',')!=-1:
protolist = l4proto.split(",")
elif plist.find(' ')!=-1:
protolist = l4proto.split(" ")
else:
protolist.append(plist)
#print "Unexpected error, likely bad characters in list of ports :", sys.exc_info()[0]
protolist= map(lambda x:x.lower(),protolist)
return protolist
def readpcap(pfile):
return pyshark.FileCapture(pfile)
def epochconv(tsstr):
# convert the frame time into iso via epoch, clumsy but works better for excel
# return list so we can have both in the CSV, epoch and friendly
retlist=[]
dtobj=datetime.fromtimestamp(float(tsstr))
retlist.append(str(dtobj).strip())
retlist.append(tsstr.strip())
return retlist
def appendcsv(rlist):
# convert ints and
outputline = ",".join(map(str, rlist))
with open(ofile,"a") as outputfile:
outputfile.write(outputline + "\n")
return
def tcpdecode(lyrlst,l4plist):
if lyrlst._layer_name.lower() in l4plist :
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
return "#".join(map(str,tmplist))
else:
return
def udpdecode(lyrlst, l4plist):
if lyrlst._layer_name.lower() in l4plist:
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
return "#".join(map(str,tmplist))
else:
return
def parseTCP(tpkt):
#print "running parseTCP"
if len(tpkt.layers) > 3:
# pass to http module
decoded = tcpdecode(tpkt.layers[3],thisproto)
rowlist[8]= str(decoded)
#rowlist[8]= str(tpkt.layers[3]).replace('\n','')
# Complete this section regardless
rowlist[3]= 6
rowlist[4]= str(tpkt.ip.src).strip()
rowlist[5]= int(tpkt.tcp.dstport)
rowlist[6]= int(tpkt.tcp.srcport)
rowlist[7]= str(tpkt.tcp.flags).strip()
tsstr=str(tpkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y%m%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseICMP(ipkt):
#print "running parseICMP"
rowlist[3]= 1
rowlist[4]= str(ipkt.ip.src).strip()
rowlist[5]= int(ipkt.icmp.type)
rowlist[6]= int(ipkt.icmp.code)
tsstr=str(ipkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseUDP(upkt):
#print "running parseUDP"
if len(upkt.layers) > 3:
# pass to http module
decoded = udpdecode(upkt.layers[3],thisproto)
rowlist[8]= str(decoded)
rowlist[3]= 17
rowlist[4]= str(upkt.ip.src).strip()
rowlist[5]= int(upkt.udp.dstport)
rowlist[6]= int(upkt.udp.srcport)
tsstr=str(upkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseIPother(ipopkt):
print "running parseIP Other "
rowlist[3]= int(ipopkt.ip.proto)
rowlist[4]= str(ipopkt.ip.src).strip()
tsstr=str(ipopkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def protorouter(evalpkt):
# direct
if int(evalpkt.ip.proto) == 6:
parseTCP(evalpkt)
elif int(evalpkt.ip.proto) == 1:
parseICMP(evalpkt)
elif int(evalpkt.ip.proto) == 17:
parseUDP(evalpkt)
else:
parseIPother(evalpkt)
return
def initrow():
# iso-tstamp Date, iso-tstamp Time, epoch-tstamp, proto, src-ip, dest port/type, flag/code, src port, payload decode
rwlist = [str('iso-date'),str('iso-time'),str('epoch-tstamp'),int(6),str('1.2.3.4'),None,None,None,None]
return rwlist
# Main flow
thiscap = readpcap(ifile)
wrstat = True
# cheat making a global
rowlist=[]
thisproto=evall4plist(l4proto)
for pkt in thiscap:
pktsrc = str(pkt.ip.src)
if pktsrc != selfip:
#reinit array
rowlist = initrow()
protorouter(pkt)
appendcsv(rowlist)
|
dleecefft/pcapstats
|
pbin/parseL4Info.py
|
Python
|
apache-2.0
| 5,615
| 0.018878
|
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _kern():
return cuda.elementwise(
'T cond, T x, T slope', 'T y',
'y = cond >= 0 ? x : (T)(slope * x)', 'lrelu')
class LeakyReLU(function.Function):
"""Leaky rectifier unit."""
def __init__(self, slope=0.2):
self.slope = slope
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, x):
y = x[0].copy()
y[x[0] < 0] *= self.slope
if self.slope >= 0:
self.retain_inputs(())
self.retain_outputs((0,))
return y,
def forward_gpu(self, x):
y = _kern()(x[0], x[0], self.slope)
if self.slope >= 0:
self.retain_inputs(())
self.retain_outputs((0,))
return y,
def backward_cpu(self, x, gy):
gx = gy[0].copy()
if self.slope >= 0:
y = self.output_data
gx[y[0] < 0] *= self.slope
else:
gx[x[0] < 0] *= self.slope
return gx,
def backward_gpu(self, x, gy):
if self.slope >= 0:
y = self.output_data
gx = _kern()(y[0], gy[0], self.slope)
else:
gx = _kern()(x[0], gy[0], self.slope)
return gx,
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expressed as
.. math:: f(x)=\\max(x, ax),
where :math:`a` is a configurable slope value.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], 'f')
>>> x
array([[-1., 0.],
[ 2., -3.],
[-2., 1.]], dtype=float32)
>>> F.leaky_relu(x, slope=0.2).data
array([[-0.2 , 0. ],
[ 2. , -0.60000002],
[-0.40000001, 1. ]], dtype=float32)
"""
return LeakyReLU(slope)(x)
|
kiyukuta/chainer
|
chainer/functions/activation/leaky_relu.py
|
Python
|
mit
| 2,382
| 0
|
#!/usr/bin/python3 -i
#
# Copyright (c) 2015-2020 The Khronos Group Inc.
# Copyright (c) 2015-2020 Valve Corporation
# Copyright (c) 2015-2020 LunarG, Inc.
# Copyright (c) 2015-2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
# Author: Mark Lobodzinski <mark@lunarg.com>
import os,re,sys
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
# LayerChassisDispatchGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by LayerChassisDispatchOutputGenerator objects during
# layer chassis dispatch file generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class LayerChassisDispatchGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True):
GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, emitExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
# LayerChassisDispatchOutputGenerator - subclass of OutputGenerator.
# Generates layer chassis non-dispatchable handle-wrapping code.
#
# ---- methods ----
# LayerChassisDispatchOutputGenerator(errFile, warnFile, diagFile) - args as for OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genCmd(cmdinfo)
# genStruct()
# genType()
class LayerChassisDispatchOutputGenerator(OutputGenerator):
"""Generate layer chassis handle wrapping code based on XML element attributes"""
inline_copyright_message = """
// This file is ***GENERATED***. Do Not Edit.
// See layer_chassis_dispatch_generator.py for modifications.
/* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (c) 2015-2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
*/"""
inline_custom_source_preamble = """
#define DISPATCH_MAX_STACK_ALLOCATIONS 32
// The VK_EXT_pipeline_creation_feedback extension returns data from the driver -- we've created a copy of the pnext chain, so
// copy the returned data to the caller before freeing the copy's data.
void CopyCreatePipelineFeedbackData(const void *src_chain, const void *dst_chain) {
auto src_feedback_struct = lvl_find_in_chain<VkPipelineCreationFeedbackCreateInfoEXT>(src_chain);
if (!src_feedback_struct) return;
auto dst_feedback_struct = const_cast<VkPipelineCreationFeedbackCreateInfoEXT *>(
lvl_find_in_chain<VkPipelineCreationFeedbackCreateInfoEXT>(dst_chain));
*dst_feedback_struct->pPipelineCreationFeedback = *src_feedback_struct->pPipelineCreationFeedback;
for (uint32_t i = 0; i < src_feedback_struct->pipelineStageCreationFeedbackCount; i++) {
dst_feedback_struct->pPipelineStageCreationFeedbacks[i] = src_feedback_struct->pPipelineStageCreationFeedbacks[i];
}
}
VkResult DispatchCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount,
pCreateInfos, pAllocator, pPipelines);
safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr;
if (pCreateInfos) {
local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount];
read_lock_guard_t lock(dispatch_lock);
for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
{
const auto subpasses_uses_it = layer_data->renderpasses_states.find(layer_data->Unwrap(pCreateInfos[idx0].renderPass));
if (subpasses_uses_it != layer_data->renderpasses_states.end()) {
const auto &subpasses_uses = subpasses_uses_it->second;
if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass))
uses_color_attachment = true;
if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass))
uses_depthstencil_attachment = true;
}
}
local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment);
if (pCreateInfos[idx0].basePipelineHandle) {
local_pCreateInfos[idx0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[idx0].basePipelineHandle);
}
if (pCreateInfos[idx0].layout) {
local_pCreateInfos[idx0].layout = layer_data->Unwrap(pCreateInfos[idx0].layout);
}
if (pCreateInfos[idx0].pStages) {
for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
if (pCreateInfos[idx0].pStages[idx1].module) {
local_pCreateInfos[idx0].pStages[idx1].module = layer_data->Unwrap(pCreateInfos[idx0].pStages[idx1].module);
}
}
}
if (pCreateInfos[idx0].renderPass) {
local_pCreateInfos[idx0].renderPass = layer_data->Unwrap(pCreateInfos[idx0].renderPass);
}
}
}
if (pipelineCache) {
pipelineCache = layer_data->Unwrap(pipelineCache);
}
VkResult result = layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount,
local_pCreateInfos->ptr(), pAllocator, pPipelines);
for (uint32_t i = 0; i < createInfoCount; ++i) {
if (pCreateInfos[i].pNext != VK_NULL_HANDLE) {
CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext);
}
}
delete[] local_pCreateInfos;
{
for (uint32_t i = 0; i < createInfoCount; ++i) {
if (pPipelines[i] != VK_NULL_HANDLE) {
pPipelines[i] = layer_data->WrapNew(pPipelines[i]);
}
}
}
return result;
}
template <typename T>
static void UpdateCreateRenderPassState(ValidationObject *layer_data, const T *pCreateInfo, VkRenderPass renderPass) {
auto &renderpass_state = layer_data->renderpasses_states[renderPass];
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
bool uses_color = false;
for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i)
if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true;
bool uses_depthstencil = false;
if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment)
if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)
uses_depthstencil = true;
if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass);
if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass);
}
}
VkResult DispatchCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = layer_data->device_dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
if (!wrap_handles) return result;
if (VK_SUCCESS == result) {
write_lock_guard_t lock(dispatch_lock);
UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass);
*pRenderPass = layer_data->WrapNew(*pRenderPass);
}
return result;
}
VkResult DispatchCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = layer_data->device_dispatch_table.CreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass);
if (!wrap_handles) return result;
if (VK_SUCCESS == result) {
write_lock_guard_t lock(dispatch_lock);
UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass);
*pRenderPass = layer_data->WrapNew(*pRenderPass);
}
return result;
}
VkResult DispatchCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = layer_data->device_dispatch_table.CreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass);
if (!wrap_handles) return result;
if (VK_SUCCESS == result) {
write_lock_guard_t lock(dispatch_lock);
UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass);
*pRenderPass = layer_data->WrapNew(*pRenderPass);
}
return result;
}
void DispatchDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
uint64_t renderPass_id = reinterpret_cast<uint64_t &>(renderPass);
auto iter = unique_id_mapping.pop(renderPass_id);
if (iter != unique_id_mapping.end()) {
renderPass = (VkRenderPass)iter->second;
} else {
renderPass = (VkRenderPass)0;
}
layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
write_lock_guard_t lock(dispatch_lock);
layer_data->renderpasses_states.erase(renderPass);
}
VkResult DispatchCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL;
if (pCreateInfo) {
local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo);
local_pCreateInfo->oldSwapchain = layer_data->Unwrap(pCreateInfo->oldSwapchain);
// Surface is instance-level object
local_pCreateInfo->surface = layer_data->Unwrap(pCreateInfo->surface);
}
VkResult result = layer_data->device_dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain);
delete local_pCreateInfo;
if (VK_SUCCESS == result) {
*pSwapchain = layer_data->WrapNew(*pSwapchain);
}
return result;
}
VkResult DispatchCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator,
pSwapchains);
safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL;
{
if (pCreateInfos) {
local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount];
for (uint32_t i = 0; i < swapchainCount; ++i) {
local_pCreateInfos[i].initialize(&pCreateInfos[i]);
if (pCreateInfos[i].surface) {
// Surface is instance-level object
local_pCreateInfos[i].surface = layer_data->Unwrap(pCreateInfos[i].surface);
}
if (pCreateInfos[i].oldSwapchain) {
local_pCreateInfos[i].oldSwapchain = layer_data->Unwrap(pCreateInfos[i].oldSwapchain);
}
}
}
}
VkResult result = layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(),
pAllocator, pSwapchains);
delete[] local_pCreateInfos;
if (VK_SUCCESS == result) {
for (uint32_t i = 0; i < swapchainCount; i++) {
pSwapchains[i] = layer_data->WrapNew(pSwapchains[i]);
}
}
return result;
}
VkResult DispatchGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
VkSwapchainKHR wrapped_swapchain_handle = swapchain;
if (VK_NULL_HANDLE != swapchain) {
swapchain = layer_data->Unwrap(swapchain);
}
VkResult result =
layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
if ((VK_SUCCESS == result) || (VK_INCOMPLETE == result)) {
if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
write_lock_guard_t lock(dispatch_lock);
auto &wrapped_swapchain_image_handles = layer_data->swapchain_wrapped_image_handle_map[wrapped_swapchain_handle];
for (uint32_t i = static_cast<uint32_t>(wrapped_swapchain_image_handles.size()); i < *pSwapchainImageCount; i++) {
wrapped_swapchain_image_handles.emplace_back(layer_data->WrapNew(pSwapchainImages[i]));
}
for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
pSwapchainImages[i] = wrapped_swapchain_image_handles[i];
}
}
}
return result;
}
void DispatchDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
write_lock_guard_t lock(dispatch_lock);
auto &image_array = layer_data->swapchain_wrapped_image_handle_map[swapchain];
for (auto &image_handle : image_array) {
unique_id_mapping.erase(HandleToUint64(image_handle));
}
layer_data->swapchain_wrapped_image_handle_map.erase(swapchain);
lock.unlock();
uint64_t swapchain_id = HandleToUint64(swapchain);
auto iter = unique_id_mapping.pop(swapchain_id);
if (iter != unique_id_mapping.end()) {
swapchain = (VkSwapchainKHR)iter->second;
} else {
swapchain = (VkSwapchainKHR)0;
}
layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
}
VkResult DispatchQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.QueuePresentKHR(queue, pPresentInfo);
safe_VkPresentInfoKHR *local_pPresentInfo = NULL;
{
if (pPresentInfo) {
local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo);
if (local_pPresentInfo->pWaitSemaphores) {
for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) {
local_pPresentInfo->pWaitSemaphores[index1] = layer_data->Unwrap(pPresentInfo->pWaitSemaphores[index1]);
}
}
if (local_pPresentInfo->pSwapchains) {
for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) {
local_pPresentInfo->pSwapchains[index1] = layer_data->Unwrap(pPresentInfo->pSwapchains[index1]);
}
}
}
}
VkResult result = layer_data->device_dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr());
// pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version,
// so handle it as a special case here:
if (pPresentInfo && pPresentInfo->pResults) {
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i];
}
}
delete local_pPresentInfo;
return result;
}
void DispatchDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
write_lock_guard_t lock(dispatch_lock);
// remove references to implicitly freed descriptor sets
for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) {
unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set));
}
layer_data->pool_descriptor_sets_map.erase(descriptorPool);
lock.unlock();
uint64_t descriptorPool_id = reinterpret_cast<uint64_t &>(descriptorPool);
auto iter = unique_id_mapping.pop(descriptorPool_id);
if (iter != unique_id_mapping.end()) {
descriptorPool = (VkDescriptorPool)iter->second;
} else {
descriptorPool = (VkDescriptorPool)0;
}
layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
VkResult DispatchResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE;
{
local_descriptor_pool = layer_data->Unwrap(descriptorPool);
}
VkResult result = layer_data->device_dispatch_table.ResetDescriptorPool(device, local_descriptor_pool, flags);
if (VK_SUCCESS == result) {
write_lock_guard_t lock(dispatch_lock);
// remove references to implicitly freed descriptor sets
for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) {
unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set));
}
layer_data->pool_descriptor_sets_map[descriptorPool].clear();
}
return result;
}
VkResult DispatchAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
safe_VkDescriptorSetAllocateInfo *local_pAllocateInfo = NULL;
{
if (pAllocateInfo) {
local_pAllocateInfo = new safe_VkDescriptorSetAllocateInfo(pAllocateInfo);
if (pAllocateInfo->descriptorPool) {
local_pAllocateInfo->descriptorPool = layer_data->Unwrap(pAllocateInfo->descriptorPool);
}
if (local_pAllocateInfo->pSetLayouts) {
for (uint32_t index1 = 0; index1 < local_pAllocateInfo->descriptorSetCount; ++index1) {
local_pAllocateInfo->pSetLayouts[index1] = layer_data->Unwrap(local_pAllocateInfo->pSetLayouts[index1]);
}
}
}
}
VkResult result = layer_data->device_dispatch_table.AllocateDescriptorSets(
device, (const VkDescriptorSetAllocateInfo *)local_pAllocateInfo, pDescriptorSets);
if (local_pAllocateInfo) {
delete local_pAllocateInfo;
}
if (VK_SUCCESS == result) {
write_lock_guard_t lock(dispatch_lock);
auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[pAllocateInfo->descriptorPool];
for (uint32_t index0 = 0; index0 < pAllocateInfo->descriptorSetCount; index0++) {
pDescriptorSets[index0] = layer_data->WrapNew(pDescriptorSets[index0]);
pool_descriptor_sets.insert(pDescriptorSets[index0]);
}
}
return result;
}
VkResult DispatchFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
const VkDescriptorSet *pDescriptorSets) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
VkDescriptorSet *local_pDescriptorSets = NULL;
VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE;
{
local_descriptor_pool = layer_data->Unwrap(descriptorPool);
if (pDescriptorSets) {
local_pDescriptorSets = new VkDescriptorSet[descriptorSetCount];
for (uint32_t index0 = 0; index0 < descriptorSetCount; ++index0) {
local_pDescriptorSets[index0] = layer_data->Unwrap(pDescriptorSets[index0]);
}
}
}
VkResult result = layer_data->device_dispatch_table.FreeDescriptorSets(device, local_descriptor_pool, descriptorSetCount,
(const VkDescriptorSet *)local_pDescriptorSets);
if (local_pDescriptorSets) delete[] local_pDescriptorSets;
if ((VK_SUCCESS == result) && (pDescriptorSets)) {
write_lock_guard_t lock(dispatch_lock);
auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[descriptorPool];
for (uint32_t index0 = 0; index0 < descriptorSetCount; index0++) {
VkDescriptorSet handle = pDescriptorSets[index0];
pool_descriptor_sets.erase(handle);
uint64_t unique_id = reinterpret_cast<uint64_t &>(handle);
unique_id_mapping.erase(unique_id);
}
}
return result;
}
// This is the core version of this routine. The extension version is below.
VkResult DispatchCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator,
pDescriptorUpdateTemplate);
safe_VkDescriptorUpdateTemplateCreateInfo var_local_pCreateInfo;
safe_VkDescriptorUpdateTemplateCreateInfo *local_pCreateInfo = NULL;
if (pCreateInfo) {
local_pCreateInfo = &var_local_pCreateInfo;
local_pCreateInfo->initialize(pCreateInfo);
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
local_pCreateInfo->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout);
}
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
local_pCreateInfo->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout);
}
}
VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, local_pCreateInfo->ptr(), pAllocator,
pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
*pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate);
// Shadow template createInfo for later updates
if (local_pCreateInfo) {
write_lock_guard_t lock(dispatch_lock);
std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_pCreateInfo));
layer_data->desc_template_createinfo_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state);
}
}
return result;
}
// This is the extension version of this routine. The core version is above.
VkResult DispatchCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
pDescriptorUpdateTemplate);
safe_VkDescriptorUpdateTemplateCreateInfo var_local_pCreateInfo;
safe_VkDescriptorUpdateTemplateCreateInfo *local_pCreateInfo = NULL;
if (pCreateInfo) {
local_pCreateInfo = &var_local_pCreateInfo;
local_pCreateInfo->initialize(pCreateInfo);
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
local_pCreateInfo->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout);
}
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
local_pCreateInfo->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout);
}
}
VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_pCreateInfo->ptr(),
pAllocator, pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
*pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate);
// Shadow template createInfo for later updates
if (local_pCreateInfo) {
write_lock_guard_t lock(dispatch_lock);
std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_pCreateInfo));
layer_data->desc_template_createinfo_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state);
}
}
return result;
}
// This is the core version of this routine. The extension version is below.
void DispatchDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
write_lock_guard_t lock(dispatch_lock);
uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
layer_data->desc_template_createinfo_map.erase(descriptor_update_template_id);
lock.unlock();
auto iter = unique_id_mapping.pop(descriptor_update_template_id);
if (iter != unique_id_mapping.end()) {
descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)iter->second;
} else {
descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)0;
}
layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
}
// This is the extension version of this routine. The core version is above.
void DispatchDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
write_lock_guard_t lock(dispatch_lock);
uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
layer_data->desc_template_createinfo_map.erase(descriptor_update_template_id);
lock.unlock();
auto iter = unique_id_mapping.pop(descriptor_update_template_id);
if (iter != unique_id_mapping.end()) {
descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)iter->second;
} else {
descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)0;
}
layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
}
void *BuildUnwrappedUpdateTemplateBuffer(ValidationObject *layer_data, uint64_t descriptorUpdateTemplate, const void *pData) {
auto const template_map_entry = layer_data->desc_template_createinfo_map.find(descriptorUpdateTemplate);
auto const &create_info = template_map_entry->second->create_info;
size_t allocation_size = 0;
std::vector<std::tuple<size_t, VulkanObjectType, uint64_t, size_t>> template_entries;
for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) {
for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) {
size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride;
char *update_entry = (char *)(pData) + offset;
switch (create_info.pDescriptorUpdateEntries[i].descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry);
allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo));
VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry);
wrapped_entry->sampler = layer_data->Unwrap(image_entry->sampler);
wrapped_entry->imageView = layer_data->Unwrap(image_entry->imageView);
template_entries.emplace_back(offset, kVulkanObjectTypeImage, CastToUint64(wrapped_entry), 0);
} break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry);
allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo));
VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry);
wrapped_entry->buffer = layer_data->Unwrap(buffer_entry->buffer);
template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, CastToUint64(wrapped_entry), 0);
} break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry);
allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView));
VkBufferView wrapped_entry = layer_data->Unwrap(*buffer_view_handle);
template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, CastToUint64(wrapped_entry), 0);
} break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
size_t numBytes = create_info.pDescriptorUpdateEntries[i].descriptorCount;
allocation_size = std::max(allocation_size, offset + numBytes);
// nothing to unwrap, just plain data
template_entries.emplace_back(offset, kVulkanObjectTypeUnknown, CastToUint64(update_entry),
numBytes);
// to break out of the loop
j = create_info.pDescriptorUpdateEntries[i].descriptorCount;
} break;
default:
assert(0);
break;
}
}
}
// Allocate required buffer size and populate with source/unwrapped data
void *unwrapped_data = malloc(allocation_size);
for (auto &this_entry : template_entries) {
VulkanObjectType type = std::get<1>(this_entry);
void *destination = (char *)unwrapped_data + std::get<0>(this_entry);
uint64_t source = std::get<2>(this_entry);
size_t size = std::get<3>(this_entry);
if (size != 0) {
assert(type == kVulkanObjectTypeUnknown);
memcpy(destination, CastFromUint64<void *>(source), size);
} else {
switch (type) {
case kVulkanObjectTypeImage:
*(reinterpret_cast<VkDescriptorImageInfo *>(destination)) =
*(reinterpret_cast<VkDescriptorImageInfo *>(source));
delete CastFromUint64<VkDescriptorImageInfo *>(source);
break;
case kVulkanObjectTypeBuffer:
*(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) =
*(CastFromUint64<VkDescriptorBufferInfo *>(source));
delete CastFromUint64<VkDescriptorBufferInfo *>(source);
break;
case kVulkanObjectTypeBufferView:
*(reinterpret_cast<VkBufferView *>(destination)) = CastFromUint64<VkBufferView>(source);
break;
default:
assert(0);
break;
}
}
}
return (void *)unwrapped_data;
}
void DispatchUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate,
pData);
uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
void *unwrapped_buffer = nullptr;
{
read_lock_guard_t lock(dispatch_lock);
descriptorSet = layer_data->Unwrap(descriptorSet);
descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)layer_data->Unwrap(descriptorUpdateTemplate);
unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData);
}
layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer);
free(unwrapped_buffer);
}
void DispatchUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate,
pData);
uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
void *unwrapped_buffer = nullptr;
{
read_lock_guard_t lock(dispatch_lock);
descriptorSet = layer_data->Unwrap(descriptorSet);
descriptorUpdateTemplate = layer_data->Unwrap(descriptorUpdateTemplate);
unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData);
}
layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer);
free(unwrapped_buffer);
}
void DispatchCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout,
uint32_t set, const void *pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (!wrap_handles)
return layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate,
layout, set, pData);
uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
void *unwrapped_buffer = nullptr;
{
read_lock_guard_t lock(dispatch_lock);
descriptorUpdateTemplate = layer_data->Unwrap(descriptorUpdateTemplate);
layout = layer_data->Unwrap(layout);
unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData);
}
layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set,
unwrapped_buffer);
free(unwrapped_buffer);
}
VkResult DispatchGetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPropertiesKHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
if (!wrap_handles) return result;
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
pProperties[idx0].display = layer_data->MaybeWrapDisplay(pProperties[idx0].display, layer_data);
}
}
return result;
}
VkResult DispatchGetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayProperties2KHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties);
if (!wrap_handles) return result;
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
pProperties[idx0].displayProperties.display =
layer_data->MaybeWrapDisplay(pProperties[idx0].displayProperties.display, layer_data);
}
}
return result;
}
VkResult DispatchGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPlanePropertiesKHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result =
layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
if (!wrap_handles) return result;
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
VkDisplayKHR &opt_display = pProperties[idx0].currentDisplay;
if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data);
}
}
return result;
}
VkResult DispatchGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPlaneProperties2KHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result = layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice,
pPropertyCount, pProperties);
if (!wrap_handles) return result;
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
VkDisplayKHR &opt_display = pProperties[idx0].displayPlaneProperties.currentDisplay;
if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data);
}
}
return result;
}
VkResult DispatchGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount,
VkDisplayKHR *pDisplays) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
VkResult result = layer_data->instance_dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex,
pDisplayCount, pDisplays);
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pDisplays) {
if (!wrap_handles) return result;
for (uint32_t i = 0; i < *pDisplayCount; ++i) {
if (pDisplays[i]) pDisplays[i] = layer_data->MaybeWrapDisplay(pDisplays[i], layer_data);
}
}
return result;
}
VkResult DispatchGetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount,
VkDisplayModePropertiesKHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
if (!wrap_handles)
return layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount,
pProperties);
{
display = layer_data->Unwrap(display);
}
VkResult result = layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
pProperties[idx0].displayMode = layer_data->WrapNew(pProperties[idx0].displayMode);
}
}
return result;
}
VkResult DispatchGetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount,
VkDisplayModeProperties2KHR *pProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
if (!wrap_handles)
return layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount,
pProperties);
{
display = layer_data->Unwrap(display);
}
VkResult result =
layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties);
if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) {
for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
pProperties[idx0].displayModeProperties.displayMode = layer_data->WrapNew(pProperties[idx0].displayModeProperties.displayMode);
}
}
return result;
}
VkResult DispatchDebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
safe_VkDebugMarkerObjectTagInfoEXT local_tag_info(pTagInfo);
{
auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info.object));
if (it != unique_id_mapping.end()) {
local_tag_info.object = it->second;
}
}
VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device,
reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(&local_tag_info));
return result;
}
VkResult DispatchDebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
safe_VkDebugMarkerObjectNameInfoEXT local_name_info(pNameInfo);
{
auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info.object));
if (it != unique_id_mapping.end()) {
local_name_info.object = it->second;
}
}
VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(
device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(&local_name_info));
return result;
}
// VK_EXT_debug_utils
VkResult DispatchSetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
safe_VkDebugUtilsObjectTagInfoEXT local_tag_info(pTagInfo);
{
auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info.objectHandle));
if (it != unique_id_mapping.end()) {
local_tag_info.objectHandle = it->second;
}
}
VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(
device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT *>(&local_tag_info));
return result;
}
VkResult DispatchSetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
safe_VkDebugUtilsObjectNameInfoEXT local_name_info(pNameInfo);
{
auto it = unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info.objectHandle));
if (it != unique_id_mapping.end()) {
local_name_info.objectHandle = it->second;
}
}
VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(
device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT *>(&local_name_info));
return result;
}
VkResult DispatchGetPhysicalDeviceToolPropertiesEXT(
VkPhysicalDevice physicalDevice,
uint32_t* pToolCount,
VkPhysicalDeviceToolPropertiesEXT* pToolProperties)
{
VkResult result = VK_SUCCESS;
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
if (layer_data->instance_dispatch_table.GetPhysicalDeviceToolPropertiesEXT == nullptr) {
// This layer is the terminator. Set pToolCount to zero.
*pToolCount = 0;
} else {
result = layer_data->instance_dispatch_table.GetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties);
}
return result;
}
"""
# Separate generated text for source and headers
ALL_SECTIONS = ['source_file', 'header_file']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
self.instance_extensions = []
self.device_extensions = []
# Commands which are not autogenerated but still intercepted
self.no_autogen_list = [
'vkCreateInstance',
'vkDestroyInstance',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateSwapchainKHR',
'vkCreateSharedSwapchainsKHR',
'vkGetSwapchainImagesKHR',
'vkDestroySwapchainKHR',
'vkQueuePresentKHR',
'vkCreateGraphicsPipelines',
'vkResetDescriptorPool',
'vkDestroyDescriptorPool',
'vkAllocateDescriptorSets',
'vkFreeDescriptorSets',
'vkCreateDescriptorUpdateTemplate',
'vkCreateDescriptorUpdateTemplateKHR',
'vkDestroyDescriptorUpdateTemplate',
'vkDestroyDescriptorUpdateTemplateKHR',
'vkUpdateDescriptorSetWithTemplate',
'vkUpdateDescriptorSetWithTemplateKHR',
'vkCmdPushDescriptorSetWithTemplateKHR',
'vkDebugMarkerSetObjectTagEXT',
'vkDebugMarkerSetObjectNameEXT',
'vkCreateRenderPass',
'vkCreateRenderPass2KHR',
'vkCreateRenderPass2',
'vkDestroyRenderPass',
'vkSetDebugUtilsObjectNameEXT',
'vkSetDebugUtilsObjectTagEXT',
'vkGetPhysicalDeviceDisplayPropertiesKHR',
'vkGetPhysicalDeviceDisplayProperties2KHR',
'vkGetPhysicalDeviceDisplayPlanePropertiesKHR',
'vkGetPhysicalDeviceDisplayPlaneProperties2KHR',
'vkGetDisplayPlaneSupportedDisplaysKHR',
'vkGetDisplayModePropertiesKHR',
'vkGetDisplayModeProperties2KHR',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateInstanceVersion',
'vkGetPhysicalDeviceToolPropertiesEXT',
]
self.headerVersion = None
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.cmdMembers = []
self.cmd_feature_protect = [] # Save ifdef's for each command
self.cmd_info_data = [] # Save the cmdinfo data for wrapping the handles when processing is complete
self.structMembers = [] # List of StructMemberData records for all Vulkan structs
self.extension_structs = [] # List of all structs or sister-structs containing handles
# A sister-struct may contain no handles but shares a structextends attribute with one that does
self.pnext_extension_structs = [] # List of all structs which can be extended by a pnext chain
self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType
self.struct_member_dict = dict()
# Named tuples to store struct and command data
self.StructType = namedtuple('StructType', ['name', 'value'])
self.CmdMemberData = namedtuple('CmdMemberData', ['name', 'members'])
self.CmdInfoData = namedtuple('CmdInfoData', ['name', 'cmdinfo'])
self.CmdExtraProtect = namedtuple('CmdExtraProtect', ['name', 'extra_protect'])
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isconst', 'iscount', 'len', 'extstructs', 'cdecl', 'islocal', 'iscreate', 'isdestroy', 'feature_protect'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
#
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
#
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
#
# Override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
#
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Initialize members that require the tree
self.handle_types = GetHandleTypes(self.registry.tree)
self.type_categories = GetTypeCategories(self.registry.tree)
# Output Copyright
self.appendSection('header_file', self.inline_copyright_message)
# Multiple inclusion protection & C++ namespace.
self.header = False
if (self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
self.appendSection('header_file', '#pragma once')
self.appendSection('header_file', '')
self.appendSection('header_file', '#if defined(LAYER_CHASSIS_CAN_WRAP_HANDLES)')
self.appendSection('header_file', 'extern bool wrap_handles;')
self.appendSection('header_file', '#else')
self.appendSection('header_file', 'extern bool wrap_handles;')
self.appendSection('header_file', '#endif')
# Now that the data is all collected and complete, generate and output the wrapping/unwrapping routines
def endFile(self):
self.struct_member_dict = dict(self.structMembers)
# Generate the list of APIs that might need to handle wrapped extension structs
self.GenerateCommandWrapExtensionList()
# Write out wrapping/unwrapping functions
self.WrapCommands()
# Build and write out pNext processing function
extension_proc = self.build_extension_processing_func()
if not self.header:
write(self.inline_copyright_message, file=self.outFile)
self.newline()
write('#include <mutex>', file=self.outFile)
write('#include "chassis.h"', file=self.outFile)
write('#include "layer_chassis_dispatch.h"', file=self.outFile)
write('#include "vk_layer_utils.h"', file=self.outFile)
self.newline()
write('// This intentionally includes a cpp file', file=self.outFile)
write('#include "vk_safe_struct.cpp"', file=self.outFile)
self.newline()
write('ReadWriteLock dispatch_lock;', file=self.outFile)
self.newline()
write('// Unique Objects pNext extension handling function', file=self.outFile)
write('%s' % extension_proc, file=self.outFile)
self.newline()
write('// Manually written Dispatch routines', file=self.outFile)
write('%s' % self.inline_custom_source_preamble, file=self.outFile)
self.newline()
if (self.sections['source_file']):
write('\n'.join(self.sections['source_file']), end=u'', file=self.outFile)
else:
self.newline()
if (self.sections['header_file']):
write('\n'.join(self.sections['header_file']), end=u'', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
#
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
self.headerVersion = None
self.featureExtraProtect = GetFeatureProtect(interface)
if self.featureName != 'VK_VERSION_1_0' and self.featureName != 'VK_VERSION_1_1':
white_list_entry = []
if (self.featureExtraProtect is not None):
white_list_entry += [ '#ifdef %s' % self.featureExtraProtect ]
white_list_entry += [ '"%s"' % self.featureName ]
if (self.featureExtraProtect is not None):
white_list_entry += [ '#endif' ]
featureType = interface.get('type')
if featureType == 'instance':
self.instance_extensions += white_list_entry
elif featureType == 'device':
self.device_extensions += white_list_entry
#
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
def genType(self, typeinfo, name, alias):
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags generating a structure.
# Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name, alias)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
ispointer = True
return ispointer
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
len = param.attrib.get('len')
if len and len != 'null-terminated':
# For string arrays, 'len' can look like 'count,null-terminated', indicating that we
# have a null terminated array of strings. We strip the null-terminated from the
# 'len' field and only return the parameter specifying the string count
if 'null-terminated' in len:
result = len.split(',')[0]
else:
result = len
# Spec has now notation for len attributes, using :: instead of platform specific pointer symbol
result = str(result).replace('::', '->')
return result
#
# Generate a VkStructureType based on a structure typename
def genVkStructureType(self, typename):
# Add underscore between lowercase then uppercase
value = re.sub('([a-z0-9])([A-Z])', r'\1_\2', typename)
# Change to uppercase
value = value.upper()
# Add STRUCTURE_TYPE_
return re.sub('VK_', 'VK_STRUCTURE_TYPE_', value)
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are interpreted as a set of
# <member> tags instead of freeform C type declarations. The <member> tags are just like
# <param> tags - they are a declaration of a struct or union member. Only simple member
# declarations are supported (no nested structs etc.)
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
members = typeinfo.elem.findall('.//member')
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
# Generate member info
membersInfo = []
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
cdecl = self.makeCParamDecl(member, 0)
# Process VkStructureType
if type == 'VkStructureType':
# Extract the required struct type value from the comments
# embedded in the original text defining the 'typeinfo' element
rawXml = etree.tostring(typeinfo.elem).decode('ascii')
result = re.search(r'VK_STRUCTURE_TYPE_\w+', rawXml)
if result:
value = result.group(0)
else:
value = self.genVkStructureType(typeName)
# Store the required type value
self.structTypes[typeName] = self.StructType(name=name, value=value)
# Store pointer/array/string info
extstructs = self.registry.validextensionstructs[typeName] if name == 'pNext' else None
membersInfo.append(self.CommandParam(type=type,
name=name,
ispointer=self.paramIsPointer(member),
isconst=True if 'const' in cdecl else False,
iscount=True if name in lens else False,
len=self.getLen(member),
extstructs=extstructs,
cdecl=cdecl,
islocal=False,
iscreate=False,
isdestroy=False,
feature_protect=self.featureExtraProtect))
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Determine if a struct has an NDO as a member or an embedded member
def struct_contains_ndo(self, struct_item):
struct_member_dict = dict(self.structMembers)
struct_members = struct_member_dict[struct_item]
for member in struct_members:
if self.handle_types.IsNonDispatchable(member.type):
return True
# recurse for member structs, guard against infinite recursion
elif member.type in struct_member_dict and member.type != struct_item:
if self.struct_contains_ndo(member.type):
return True
return False
#
# Return list of struct members which contain, or which sub-structures contain
# an NDO in a given list of parameters or members
def getParmeterStructsWithNdos(self, item_list):
struct_list = set()
for item in item_list:
paramtype = item.find('type')
typecategory = self.type_categories[paramtype.text]
if typecategory == 'struct':
if self.struct_contains_ndo(paramtype.text) == True:
struct_list.add(item)
return struct_list
#
# Return list of non-dispatchable objects from a given list of parameters or members
def getNdosInParameterList(self, item_list, create_func):
ndo_list = set()
if create_func == True:
member_list = item_list[0:-1]
else:
member_list = item_list
for item in member_list:
if self.handle_types.IsNonDispatchable(paramtype.text):
ndo_list.add(item)
return ndo_list
#
# Construct list of extension structs containing handles, or extension structs that share a structextends attribute
# WITH an extension struct containing handles. All extension structs in any pNext chain will have to be copied.
# TODO: make this recursive -- structs buried three or more levels deep are not searched for extensions
def GenerateCommandWrapExtensionList(self):
for struct in self.structMembers:
if (len(struct.members) > 1) and struct.members[1].extstructs is not None:
found = False;
for item in struct.members[1].extstructs:
if item != '' and item not in self.pnext_extension_structs:
self.pnext_extension_structs.append(item)
if item != '' and self.struct_contains_ndo(item) == True:
found = True
if found == True:
for item in struct.members[1].extstructs:
if item != '' and item not in self.extension_structs:
self.extension_structs.append(item)
#
# Returns True if a struct may have a pNext chain containing an NDO
def StructWithExtensions(self, struct_type):
if struct_type in self.struct_member_dict:
param_info = self.struct_member_dict[struct_type]
if (len(param_info) > 1) and param_info[1].extstructs is not None:
for item in param_info[1].extstructs:
if item in self.extension_structs:
return True
return False
#
# Generate pNext handling function
def build_extension_processing_func(self):
# Construct helper functions to build and free pNext extension chains
pnext_proc = ''
pnext_proc += 'void WrapPnextChainHandles(ValidationObject *layer_data, const void *pNext) {\n'
pnext_proc += ' void *cur_pnext = const_cast<void *>(pNext);\n'
pnext_proc += ' while (cur_pnext != NULL) {\n'
pnext_proc += ' VkBaseOutStructure *header = reinterpret_cast<VkBaseOutStructure *>(cur_pnext);\n\n'
pnext_proc += ' switch (header->sType) {\n'
for item in self.pnext_extension_structs:
struct_info = self.struct_member_dict[item]
indent = ' '
(tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, 'safe_struct->', 0, False, False, False, False)
# Only process extension structs containing handles
if not tmp_pre:
continue
if struct_info[0].feature_protect is not None:
pnext_proc += '#ifdef %s \n' % struct_info[0].feature_protect
pnext_proc += ' case %s: {\n' % self.structTypes[item].value
pnext_proc += ' safe_%s *safe_struct = reinterpret_cast<safe_%s *>(cur_pnext);\n' % (item, item)
# Generate code to unwrap the handles
pnext_proc += tmp_pre
pnext_proc += ' } break;\n'
if struct_info[0].feature_protect is not None:
pnext_proc += '#endif // %s \n' % struct_info[0].feature_protect
pnext_proc += '\n'
pnext_proc += ' default:\n'
pnext_proc += ' break;\n'
pnext_proc += ' }\n\n'
pnext_proc += ' // Process the next structure in the chain\n'
pnext_proc += ' cur_pnext = header->pNext;\n'
pnext_proc += ' }\n'
pnext_proc += '}\n'
return pnext_proc
#
# Generate source for creating a non-dispatchable object
def generate_create_ndo_code(self, indent, proto, params, cmd_info):
create_ndo_code = ''
handle_type = params[-1].find('type')
if self.handle_types.IsNonDispatchable(handle_type.text):
# Check for special case where multiple handles are returned
ndo_array = False
if cmd_info[-1].len is not None:
ndo_array = True;
handle_name = params[-1].find('name')
# Special case return value handling for the createpipeline APIs
is_create_pipelines = ('CreateGraphicsPipelines' in proto.text) or ('CreateComputePipelines' in proto.text) or ('CreateRayTracingPipelines' in proto.text)
if is_create_pipelines:
create_ndo_code += '%s{\n' % (indent)
else:
create_ndo_code += '%sif (VK_SUCCESS == result) {\n' % (indent)
indent = self.incIndent(indent)
ndo_dest = '*%s' % handle_name.text
if ndo_array == True:
create_ndo_code += '%sfor (uint32_t index0 = 0; index0 < %s; index0++) {\n' % (indent, cmd_info[-1].len)
indent = self.incIndent(indent)
ndo_dest = '%s[index0]' % cmd_info[-1].name
if is_create_pipelines:
create_ndo_code += '%sif (%s != VK_NULL_HANDLE) {\n' % (indent, ndo_dest)
indent = self.incIndent(indent)
create_ndo_code += '%s%s = layer_data->WrapNew(%s);\n' % (indent, ndo_dest, ndo_dest)
if ndo_array == True:
if is_create_pipelines:
indent = self.decIndent(indent)
create_ndo_code += '%s}\n' % indent
indent = self.decIndent(indent)
create_ndo_code += '%s}\n' % indent
indent = self.decIndent(indent)
create_ndo_code += '%s}\n' % (indent)
return create_ndo_code
#
# Generate source for destroying a non-dispatchable object
def generate_destroy_ndo_code(self, indent, proto, cmd_info):
destroy_ndo_code = ''
ndo_array = False
if True in [destroy_txt in proto.text for destroy_txt in ['Destroy', 'Free']]:
# Check for special case where multiple handles are returned
if cmd_info[-1].len is not None:
ndo_array = True;
param = -1
else:
param = -2
if self.handle_types.IsNonDispatchable(cmd_info[param].type):
if ndo_array == True:
# This API is freeing an array of handles. Remove them from the unique_id map.
destroy_ndo_code += '%sif ((VK_SUCCESS == result) && (%s)) {\n' % (indent, cmd_info[param].name)
indent = self.incIndent(indent)
destroy_ndo_code += '%sfor (uint32_t index0 = 0; index0 < %s; index0++) {\n' % (indent, cmd_info[param].len)
indent = self.incIndent(indent)
destroy_ndo_code += '%s%s handle = %s[index0];\n' % (indent, cmd_info[param].type, cmd_info[param].name)
destroy_ndo_code += '%suint64_t unique_id = reinterpret_cast<uint64_t &>(handle);\n' % (indent)
destroy_ndo_code += '%sunique_id_mapping.erase(unique_id);\n' % (indent)
indent = self.decIndent(indent);
destroy_ndo_code += '%s}\n' % indent
indent = self.decIndent(indent);
destroy_ndo_code += '%s}\n' % indent
else:
# Remove a single handle from the map
destroy_ndo_code += '%suint64_t %s_id = reinterpret_cast<uint64_t &>(%s);\n' % (indent, cmd_info[param].name, cmd_info[param].name)
destroy_ndo_code += '%sauto iter = unique_id_mapping.pop(%s_id);\n' % (indent, cmd_info[param].name)
destroy_ndo_code += '%sif (iter != unique_id_mapping.end()) {\n' % (indent)
indent = self.incIndent(indent)
destroy_ndo_code += '%s%s = (%s)iter->second;\n' % (indent, cmd_info[param].name, cmd_info[param].type)
indent = self.decIndent(indent);
destroy_ndo_code += '%s} else {\n' % (indent)
indent = self.incIndent(indent)
destroy_ndo_code += '%s%s = (%s)0;\n' % (indent, cmd_info[param].name, cmd_info[param].type)
indent = self.decIndent(indent);
destroy_ndo_code += '%s}\n' % (indent)
return ndo_array, destroy_ndo_code
#
# Clean up local declarations
def cleanUpLocalDeclarations(self, indent, prefix, name, len, index):
cleanup = ''
if len is not None:
cleanup = '%sif (local_%s%s) {\n' % (indent, prefix, name)
cleanup += '%s delete[] local_%s%s;\n' % (indent, prefix, name)
cleanup += "%s}\n" % (indent)
return cleanup
#
# Output UO code for a single NDO (ndo_count is NULL) or a counted list of NDOs
def outputNDOs(self, ndo_type, ndo_name, ndo_count, prefix, index, indent, destroy_func, destroy_array, top_level):
decl_code = ''
pre_call_code = ''
post_call_code = ''
if ndo_count is not None:
if top_level == True:
decl_code += '%s%s var_local_%s%s[DISPATCH_MAX_STACK_ALLOCATIONS];\n' % (indent, ndo_type, prefix, ndo_name)
decl_code += '%s%s *local_%s%s = NULL;\n' % (indent, ndo_type, prefix, ndo_name)
pre_call_code += '%s if (%s%s) {\n' % (indent, prefix, ndo_name)
indent = self.incIndent(indent)
if top_level == True:
pre_call_code += '%s local_%s%s = %s > DISPATCH_MAX_STACK_ALLOCATIONS ? new %s[%s] : var_local_%s%s;\n' % (indent, prefix, ndo_name, ndo_count, ndo_type, ndo_count, prefix, ndo_name)
pre_call_code += '%s for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (indent, index, index, ndo_count, index)
indent = self.incIndent(indent)
pre_call_code += '%s local_%s%s[%s] = layer_data->Unwrap(%s[%s]);\n' % (indent, prefix, ndo_name, index, ndo_name, index)
else:
pre_call_code += '%s for (uint32_t %s = 0; %s < %s; ++%s) {\n' % (indent, index, index, ndo_count, index)
indent = self.incIndent(indent)
pre_call_code += '%s %s%s[%s] = layer_data->Unwrap(%s%s[%s]);\n' % (indent, prefix, ndo_name, index, prefix, ndo_name, index)
indent = self.decIndent(indent)
pre_call_code += '%s }\n' % indent
indent = self.decIndent(indent)
pre_call_code += '%s }\n' % indent
if top_level == True:
post_call_code += '%sif (local_%s%s != var_local_%s%s)\n' % (indent, prefix, ndo_name, prefix, ndo_name)
indent = self.incIndent(indent)
post_call_code += '%sdelete[] local_%s;\n' % (indent, ndo_name)
else:
if top_level == True:
if (destroy_func == False) or (destroy_array == True):
pre_call_code += '%s %s = layer_data->Unwrap(%s);\n' % (indent, ndo_name, ndo_name)
else:
# Make temp copy of this var with the 'local' removed. It may be better to not pass in 'local_'
# as part of the string and explicitly print it
fix = str(prefix).strip('local_');
pre_call_code += '%s if (%s%s) {\n' % (indent, fix, ndo_name)
indent = self.incIndent(indent)
pre_call_code += '%s %s%s = layer_data->Unwrap(%s%s);\n' % (indent, prefix, ndo_name, fix, ndo_name)
indent = self.decIndent(indent)
pre_call_code += '%s }\n' % indent
return decl_code, pre_call_code, post_call_code
#
# first_level_param indicates if elements are passed directly into the function else they're below a ptr/struct
# create_func means that this is API creates or allocates NDOs
# destroy_func indicates that this API destroys or frees NDOs
# destroy_array means that the destroy_func operated on an array of NDOs
def uniquify_members(self, members, indent, prefix, array_index, create_func, destroy_func, destroy_array, first_level_param):
decls = ''
pre_code = ''
post_code = ''
index = 'index%s' % str(array_index)
array_index += 1
# Process any NDOs in this structure and recurse for any sub-structs in this struct
for member in members:
process_pnext = self.StructWithExtensions(member.type)
# Handle NDOs
if self.handle_types.IsNonDispatchable(member.type):
count_name = member.len
if (count_name is not None):
if first_level_param == False:
count_name = '%s%s' % (prefix, member.len)
if (first_level_param == False) or (create_func == False) or (not '*' in member.cdecl):
(tmp_decl, tmp_pre, tmp_post) = self.outputNDOs(member.type, member.name, count_name, prefix, index, indent, destroy_func, destroy_array, first_level_param)
decls += tmp_decl
pre_code += tmp_pre
post_code += tmp_post
# Handle Structs that contain NDOs at some level
elif member.type in self.struct_member_dict:
# Structs at first level will have an NDO, OR, we need a safe_struct for the pnext chain
if self.struct_contains_ndo(member.type) == True or process_pnext:
struct_info = self.struct_member_dict[member.type]
if any(member.ispointer for member in struct_info):
safe_type = 'safe_' + member.type
else:
safe_type = member.type
# Struct Array
if member.len is not None:
# Update struct prefix
if first_level_param == True:
new_prefix = 'local_%s' % member.name
# Declare safe_VarType for struct
decls += '%s%s *%s = NULL;\n' % (indent, safe_type, new_prefix)
else:
new_prefix = '%s%s' % (prefix, member.name)
pre_code += '%s if (%s%s) {\n' % (indent, prefix, member.name)
indent = self.incIndent(indent)
if first_level_param == True:
pre_code += '%s %s = new %s[%s];\n' % (indent, new_prefix, safe_type, member.len)
pre_code += '%s for (uint32_t %s = 0; %s < %s%s; ++%s) {\n' % (indent, index, index, prefix, member.len, index)
indent = self.incIndent(indent)
if first_level_param == True:
if 'safe_' in safe_type:
pre_code += '%s %s[%s].initialize(&%s[%s]);\n' % (indent, new_prefix, index, member.name, index)
else:
pre_code += '%s %s[%s] = %s[%s];\n' % (indent, new_prefix, index, member.name, index)
if process_pnext:
pre_code += '%s WrapPnextChainHandles(layer_data, %s[%s].pNext);\n' % (indent, new_prefix, index)
local_prefix = '%s[%s].' % (new_prefix, index)
# Process sub-structs in this struct
(tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, local_prefix, array_index, create_func, destroy_func, destroy_array, False)
decls += tmp_decl
pre_code += tmp_pre
post_code += tmp_post
indent = self.decIndent(indent)
pre_code += '%s }\n' % indent
indent = self.decIndent(indent)
pre_code += '%s }\n' % indent
if first_level_param == True:
post_code += self.cleanUpLocalDeclarations(indent, prefix, member.name, member.len, index)
# Single Struct
elif member.ispointer:
# Update struct prefix
if first_level_param == True:
new_prefix = 'local_%s->' % member.name
decls += '%s%s var_local_%s%s;\n' % (indent, safe_type, prefix, member.name)
decls += '%s%s *local_%s%s = NULL;\n' % (indent, safe_type, prefix, member.name)
else:
new_prefix = '%s%s->' % (prefix, member.name)
# Declare safe_VarType for struct
pre_code += '%s if (%s%s) {\n' % (indent, prefix, member.name)
indent = self.incIndent(indent)
if first_level_param == True:
pre_code += '%s local_%s%s = &var_local_%s%s;\n' % (indent, prefix, member.name, prefix, member.name)
if 'safe_' in safe_type:
pre_code += '%s local_%s%s->initialize(%s);\n' % (indent, prefix, member.name, member.name)
else:
pre_code += '%s *local_%s%s = *%s;\n' % (indent, prefix, member.name, member.name)
# Process sub-structs in this struct
(tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, new_prefix, array_index, create_func, destroy_func, destroy_array, False)
decls += tmp_decl
pre_code += tmp_pre
post_code += tmp_post
if process_pnext:
pre_code += '%s WrapPnextChainHandles(layer_data, local_%s%s->pNext);\n' % (indent, prefix, member.name)
indent = self.decIndent(indent)
pre_code += '%s }\n' % indent
if first_level_param == True:
post_code += self.cleanUpLocalDeclarations(indent, prefix, member.name, member.len, index)
else:
# Update struct prefix
if first_level_param == True:
sys.exit(1)
else:
new_prefix = '%s%s.' % (prefix, member.name)
# Process sub-structs in this struct
(tmp_decl, tmp_pre, tmp_post) = self.uniquify_members(struct_info, indent, new_prefix, array_index, create_func, destroy_func, destroy_array, False)
decls += tmp_decl
pre_code += tmp_pre
post_code += tmp_post
if process_pnext:
pre_code += '%s WrapPnextChainHandles(layer_data, local_%s%s.pNext);\n' % (indent, prefix, member.name)
return decls, pre_code, post_code
#
# For a particular API, generate the non-dispatchable-object wrapping/unwrapping code
def generate_wrapping_code(self, cmd):
indent = ' '
proto = cmd.find('proto/name')
params = cmd.findall('param')
if proto.text is not None:
cmd_member_dict = dict(self.cmdMembers)
cmd_info = cmd_member_dict[proto.text]
# Handle ndo create/allocate operations
if cmd_info[0].iscreate:
create_ndo_code = self.generate_create_ndo_code(indent, proto, params, cmd_info)
else:
create_ndo_code = ''
# Handle ndo destroy/free operations
if cmd_info[0].isdestroy:
(destroy_array, destroy_ndo_code) = self.generate_destroy_ndo_code(indent, proto, cmd_info)
else:
destroy_array = False
destroy_ndo_code = ''
paramdecl = ''
param_pre_code = ''
param_post_code = ''
create_func = True if create_ndo_code else False
destroy_func = True if destroy_ndo_code else False
(paramdecl, param_pre_code, param_post_code) = self.uniquify_members(cmd_info, indent, '', 0, create_func, destroy_func, destroy_array, True)
param_post_code += create_ndo_code
if destroy_ndo_code:
if destroy_array == True:
param_post_code += destroy_ndo_code
else:
param_pre_code += destroy_ndo_code
if param_pre_code:
if (not destroy_func) or (destroy_array):
param_pre_code = '%s{\n%s%s}\n' % (' ', param_pre_code, indent)
return paramdecl, param_pre_code, param_post_code
#
# Capture command parameter info needed to wrap NDOs as well as handling some boilerplate code
def genCmd(self, cmdinfo, cmdname, alias):
# Add struct-member type information to command parameter information
OutputGenerator.genCmd(self, cmdinfo, cmdname, alias)
members = cmdinfo.elem.findall('.//param')
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
struct_member_dict = dict(self.structMembers)
# Generate member info
membersInfo = []
for member in members:
# Get type and name of member
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
cdecl = self.makeCParamDecl(member, 0)
# Check for parameter name in lens set
iscount = True if name in lens else False
len = self.getLen(member)
isconst = True if 'const' in cdecl else False
ispointer = self.paramIsPointer(member)
# Mark param as local if it is an array of NDOs
islocal = False;
if self.handle_types.IsNonDispatchable(type):
if (len is not None) and (isconst == True):
islocal = True
# Or if it's a struct that contains an NDO
elif type in struct_member_dict:
if self.struct_contains_ndo(type) == True:
islocal = True
isdestroy = True if True in [destroy_txt in cmdname for destroy_txt in ['Destroy', 'Free']] else False
iscreate = True if True in [create_txt in cmdname for create_txt in ['Create', 'Allocate', 'GetRandROutputDisplayEXT', 'RegisterDeviceEvent', 'RegisterDisplayEvent']] else False
extstructs = self.registry.validextensionstructs[type] if name == 'pNext' else None
membersInfo.append(self.CommandParam(type=type,
name=name,
ispointer=ispointer,
isconst=isconst,
iscount=iscount,
len=len,
extstructs=extstructs,
cdecl=cdecl,
islocal=islocal,
iscreate=iscreate,
isdestroy=isdestroy,
feature_protect=self.featureExtraProtect))
self.cmdMembers.append(self.CmdMemberData(name=cmdname, members=membersInfo))
self.cmd_info_data.append(self.CmdInfoData(name=cmdname, cmdinfo=cmdinfo))
self.cmd_feature_protect.append(self.CmdExtraProtect(name=cmdname, extra_protect=self.featureExtraProtect))
#
# Create prototype for dispatch header file
def GenDispatchFunctionPrototype(self, cmdinfo, ifdef_text):
decls = self.makeCDecls(cmdinfo.elem)
func_sig = decls[0][:-1]
func_sig = func_sig.replace("VKAPI_ATTR ", "")
func_sig = func_sig.replace("VKAPI_CALL ", "Dispatch")
func_sig += ';'
dispatch_prototype = ''
if ifdef_text is not None:
dispatch_prototype = '#ifdef %s\n' % ifdef_text
dispatch_prototype += func_sig
if ifdef_text is not None:
dispatch_prototype += '\n#endif // %s' % ifdef_text
return dispatch_prototype
#
# Create code to wrap NDOs as well as handling some boilerplate code
def WrapCommands(self):
cmd_member_dict = dict(self.cmdMembers)
cmd_info_dict = dict(self.cmd_info_data)
cmd_protect_dict = dict(self.cmd_feature_protect)
for api_call in self.cmdMembers:
cmdname = api_call.name
cmdinfo = cmd_info_dict[api_call.name]
feature_extra_protect = cmd_protect_dict[api_call.name]
# Add fuction prototype to header data
self.appendSection('header_file', self.GenDispatchFunctionPrototype(cmdinfo, feature_extra_protect))
if cmdname in self.no_autogen_list:
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('source_file', '')
self.appendSection('source_file', '// Skip %s dispatch, manually generated' % cmdname)
continue
# Generate NDO wrapping/unwrapping code for all parameters
(api_decls, api_pre, api_post) = self.generate_wrapping_code(cmdinfo.elem)
# If API doesn't contain NDO's, we still need to make a down-chain call
down_chain_call_only = False
if not api_decls and not api_pre and not api_post:
down_chain_call_only = True
if (feature_extra_protect is not None):
self.appendSection('source_file', '')
self.appendSection('source_file', '#ifdef ' + feature_extra_protect)
decls = self.makeCDecls(cmdinfo.elem)
func_sig = decls[0][:-1]
func_sig = func_sig.replace("VKAPI_ATTR ", "")
func_sig = func_sig.replace("VKAPI_CALL ", "Dispatch")
self.appendSection('source_file', '')
self.appendSection('source_file', func_sig)
self.appendSection('source_file', '{')
# Setup common to call wrappers, first parameter is always dispatchable
dispatchable_type = cmdinfo.elem.find('param/type').text
dispatchable_name = cmdinfo.elem.find('param/name').text
# Gather the parameter items
params = cmdinfo.elem.findall('param/name')
# Pull out the text for each of the parameters, separate them by commas in a list
paramstext = ', '.join([str(param.text) for param in params])
wrapped_paramstext = paramstext
# If any of these paramters has been replaced by a local var, fix up the list
params = cmd_member_dict[cmdname]
for param in params:
if param.islocal == True or self.StructWithExtensions(param.type):
if param.ispointer == True:
wrapped_paramstext = wrapped_paramstext.replace(param.name, '(%s %s*)local_%s' % ('const', param.type, param.name))
else:
wrapped_paramstext = wrapped_paramstext.replace(param.name, '(%s %s)local_%s' % ('const', param.type, param.name))
# First, add check and down-chain call. Use correct dispatch table
dispatch_table_type = "device_dispatch_table"
if dispatchable_type in ["VkPhysicalDevice", "VkInstance"]:
dispatch_table_type = "instance_dispatch_table"
api_func = cmdinfo.elem.attrib.get('name').replace('vk','layer_data->%s.',1) % dispatch_table_type
# Call to get the layer_data pointer
self.appendSection('source_file', ' auto layer_data = GetLayerDataPtr(get_dispatch_key(%s), layer_data_map);' % dispatchable_name)
# Put all this together for the final down-chain call
if not down_chain_call_only:
unwrapped_dispatch_call = api_func + '(' + paramstext + ')'
self.appendSection('source_file', ' if (!wrap_handles) return %s;' % unwrapped_dispatch_call)
# Handle return values, if any
resulttype = cmdinfo.elem.find('proto/type')
if (resulttype is not None and resulttype.text == 'void'):
resulttype = None
if (resulttype is not None):
assignresult = resulttype.text + ' result = '
else:
assignresult = ''
# Pre-pend declarations and pre-api-call codegen
if api_decls:
self.appendSection('source_file', "\n".join(str(api_decls).rstrip().split("\n")))
if api_pre:
self.appendSection('source_file', "\n".join(str(api_pre).rstrip().split("\n")))
# Generate the wrapped dispatch call
self.appendSection('source_file', ' ' + assignresult + api_func + '(' + wrapped_paramstext + ');')
# And add the post-API-call codegen
if ('CreateGraphicsPipelines' in cmdname) or ('CreateComputePipelines' in cmdname) or ('CreateRayTracingPipelines' in cmdname):
copy_feedback_source = ' for (uint32_t i = 0; i < createInfoCount; ++i) {\n'
copy_feedback_source += ' if (pCreateInfos[i].pNext != VK_NULL_HANDLE) {\n'
copy_feedback_source += ' CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext);\n'
copy_feedback_source += ' }\n'
copy_feedback_source += ' }\n'
self.appendSection('source_file', copy_feedback_source)
self.appendSection('source_file', "\n".join(str(api_post).rstrip().split("\n")))
# Handle the return result variable, if any
if (resulttype is not None):
self.appendSection('source_file', ' return result;')
self.appendSection('source_file', '}')
if (feature_extra_protect is not None):
self.appendSection('source_file', '#endif // '+ feature_extra_protect)
|
endlessm/chromium-browser
|
third_party/angle/third_party/vulkan-validation-layers/src/scripts/layer_chassis_dispatch_generator.py
|
Python
|
bsd-3-clause
| 99,103
| 0.005711
|
from envi.archs.msp430.regs import *
checks = [
# SWPB
(
'DEC r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f10", 'data': "" },
{ 'regs': [(REG_R15, 0xbbaa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f10", 'data': "" }
),
]
|
joshuahoman/vivisect
|
envi/tests/msp430/iswpb.py
|
Python
|
apache-2.0
| 341
| 0.017595
|
import socket
import nlp
class NLPServer(object):
def __init__(self, ip, port):
self.sock = socket.socket()
self.sock.bind((ip, port))
self.processor = nlp.NLPProcessor()
print "Established Server"
def listen(self):
import thread
self.sock.listen(5)
print "Started listening at port."
while True:
c = self.sock.accept()
cli_sock, cli_addr = c
try:
print 'Got connection from', cli_addr
thread.start_new_thread(self.manageRequest, (cli_sock,))
except Exception, Argument:
print Argument
self.sock.close()
quit()
def manageRequest(self, cli_sock):
data = cli_sock.recv(8192)
result = self.processor.processQuestion(data)
cli_sock.send(str(result))
cli_sock.close()
# server = NLPServer('127.0.0.1', 3369)
import sys
server = NLPServer(str(sys.argv[1]), int(sys.argv[2]))
server.listen()
|
jeffw16/elephant
|
nlp/nlpserver.py
|
Python
|
mit
| 849
| 0.042403
|
from __future__ import division, print_function
import numpy as np
from lmfit.models import VoigtModel
from scipy.signal import argrelmax
import matplotlib.pyplot as plt
def lamda_from_bragg(th, d, n):
return 2 * d * np.sin(th / 2.) / n
def find_peaks(chi, sides=6, intensity_threshold=0):
# Find all potential peaks
preliminary_peaks = argrelmax(chi, order=20)[0]
# peaks must have at least sides pixels of data to work with
preliminary_peaks2 = preliminary_peaks[
np.where(preliminary_peaks < len(chi) - sides)]
# make certain that a peak has a drop off which causes the peak height to
# be more than twice the height at sides pixels away
criteria = chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 + sides]
criteria *= chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 - sides]
criteria *= chi[preliminary_peaks2] >= intensity_threshold
peaks = preliminary_peaks[np.where(criteria)]
left_idxs = peaks - sides
right_idxs = peaks + sides
peak_centers = peaks
left_idxs[left_idxs < 0] = 0
right_idxs[right_idxs > len(chi)] = len(chi)
return left_idxs, right_idxs, peak_centers
def get_wavelength_from_std_tth(x, y, d_spacings, ns, plot=False):
"""
Return the wavelength from a two theta scan of a standard
Parameters
----------
x: ndarray
the two theta coordinates
y: ndarray
the detector intensity
d_spacings: ndarray
the dspacings of the standard
ns: ndarray
the multiplicity of the reflection
plot: bool
If true plot some of the intermediate data
Returns
-------
float:
The average wavelength
float:
The standard deviation of the wavelength
"""
l, r, c = find_peaks(y)
lmfit_centers = []
for lidx, ridx, peak_center in zip(l, r, c):
mod = VoigtModel()
pars = mod.guess(y[lidx: ridx],
x=x[lidx: ridx])
out = mod.fit(y[lidx: ridx], pars,
x=x[lidx: ridx])
lmfit_centers.append(out.values['center'])
lmfit_centers = np.asarray(lmfit_centers)
if plot:
plt.plot(x, y)
plt.plot(x[c], y[c], 'ro')
plt.show()
wavelengths = []
l_peaks = lmfit_centers[lmfit_centers < 0.]
r_peaks = lmfit_centers[lmfit_centers > 0.]
for peak_set in [r_peaks, l_peaks[::-1]]:
for peak_center, d, n in zip(peak_set, d_spacings, ns):
tth = np.deg2rad(np.abs(peak_center))
wavelengths.append(lamda_from_bragg(tth, d, n))
return np.average(wavelengths), np.std(wavelengths)
from bluesky.callbacks import CollectThenCompute
class ComputeWavelength(CollectThenCompute):
"""
Example
-------
>>> cw = ComputeWavelgnth('tth_cal', 'some_detector', d_spacings, ns)
>>> RE(scan(...), cw)
"""
CONVERSION_FACTOR = 12.3984 # keV-Angstroms
def __init__(self, x_name, y_name, d_spacings, ns=None):
self._descriptors = []
self._events = []
self.x_name = x_name
self.y_name = y_name
self.d_spacings = d_spacings
self.wavelength = None
self.wavelength_std = None
if ns is None:
self.ns = np.ones(self.d_spacings.shape)
else:
self.ns = ns
@property
def energy(self):
if self.wavelength is None:
return None
else:
return self.CONVERSION_FACTOR / self.wavelength
def compute(self):
x = []
y = []
for event in self._events:
x.append(event['data'][self.x_name])
y.append(event['data'][self.y_name])
x = np.array(x)
y = np.array(y)
self.wavelength, self.wavelength_std = get_wavelength_from_std_tth(x, y, self.d_spacings, self.ns)
print('wavelength', self.wavelength, '+-', self.wavelength_std)
print('energy', self.energy)
"""
if __name__ == '__main__':
import os
calibration_file = os.path.join('../../data/LaB6_d.txt')
# step 0 load data
d_spacings = np.loadtxt(calibration_file)
for data_file in ['../../data/Lab6_67p8.chi', '../../data/Lab6_67p6.chi']:
a = np.loadtxt(data_file)
wavechange = []
b = np.linspace(.1, 3, 100)
for dx in b:
x = a[:, 0]
x = np.hstack((np.zeros(1), x))
x = np.hstack((-x[::-2], x))
y = a[:, 1]
y = np.hstack((np.zeros(1), y))
y = np.hstack((y[::-1], y))
x = x[:] + dx
y = y[:]
wavechange.append(get_wavelength_from_std_tth(x, y, d_spacings,
np.ones(d_spacings.shape),
)[0])
plt.plot(b, wavechange)
plt.show()
"""
|
NSLS-II-XPD/ipython_ophyd
|
archived/profile_collection-dev/startup/42-energy-calib.py
|
Python
|
bsd-2-clause
| 4,842
| 0.001239
|
""" This test need a set of pins which can be set as inputs and have no external
pull up or pull down connected.
"""
from machine import Pin
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
elif 'WiPy' in mch:
pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
else:
raise Exception('Board not supported!')
def test_noinit():
for p in pin_map:
pin = Pin(p)
pin.value()
def test_pin_read(pull):
# enable the pull resistor on all pins, then read the value
for p in pin_map:
pin = Pin(p, mode=Pin.IN, pull=pull)
for p in pin_map:
print(pin())
def test_pin_af():
for p in pin_map:
for af in Pin(p).alt_list():
if af[1] <= max_af_idx:
Pin(p, mode=Pin.ALT, alt=af[1])
Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1])
# test un-initialized pins
test_noinit()
# test with pull-up and pull-down
test_pin_read(Pin.PULL_UP)
test_pin_read(Pin.PULL_DOWN)
# test all constructor combinations
pin = Pin(pin_map[0])
pin = Pin(pin_map[0], mode=Pin.IN)
pin = Pin(pin_map[0], mode=Pin.OUT)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=None)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN)
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP)
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP)
test_pin_af() # try the entire af range on all pins
# test pin init and printing
pin = Pin(pin_map[0])
pin.init(mode=Pin.IN)
print(pin)
pin.init(Pin.IN, Pin.PULL_DOWN)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
print(pin)
# test value in OUT mode
pin = Pin(pin_map[0], mode=Pin.OUT)
pin.value(0)
pin.toggle() # test toggle
print(pin())
pin.toggle() # test toggle again
print(pin())
# test different value settings
pin(1)
print(pin.value())
pin(0)
print(pin.value())
pin.value(1)
print(pin())
pin.value(0)
print(pin())
# test all getters and setters
pin = Pin(pin_map[0], mode=Pin.OUT)
# mode
print(pin.mode() == Pin.OUT)
pin.mode(Pin.IN)
print(pin.mode() == Pin.IN)
# pull
pin.pull(None)
print(pin.pull() == None)
pin.pull(Pin.PULL_DOWN)
print(pin.pull() == Pin.PULL_DOWN)
# drive
pin.drive(Pin.MED_POWER)
print(pin.drive() == Pin.MED_POWER)
pin.drive(Pin.HIGH_POWER)
print(pin.drive() == Pin.HIGH_POWER)
# id
print(pin.id() == pin_map[0])
# all the next ones MUST raise
try:
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.IN) # incorrect drive value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value
except Exception:
print('Exception')
try:
pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af
except Exception:
print('Exception')
try:
pin.mode(Pin.PULL_UP) # incorrect pin mode
except Exception:
print('Exception')
try:
pin.pull(Pin.OUT) # incorrect pull
except Exception:
print('Exception')
try:
pin.drive(Pin.IN) # incorrect drive strength
except Exception:
print('Exception')
try:
pin.id('ABC') # id cannot be set
except Exception:
print('Exception')
|
feilongfl/micropython
|
tests/wipy/pin.py
|
Python
|
mit
| 4,685
| 0.007044
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sugarcub.settings')
from django.conf import settings # noqa
app = Celery('sugarcub')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
bronycub/sugarcub
|
sugarcub/celery.py
|
Python
|
gpl-3.0
| 576
| 0
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kinesis_stream
short_description: Manage a Kinesis Stream.
description:
- Create or Delete a Kinesis Stream.
- Update the retention period of a Kinesis Stream.
- Update Tags on a Kinesis Stream.
- Enable/disable server side encryption on a Kinesis Stream.
version_added: "2.2"
requirements: [ boto3 ]
author: Allen Sanabria (@linuxdynasty)
options:
name:
description:
- The name of the Kinesis Stream you are managing.
required: true
type: str
shards:
description:
- The number of shards you want to have with this stream.
- This is required when I(state=present)
type: int
retention_period:
description:
- The length of time (in hours) data records are accessible after they are added to
the stream.
- The default retention period is 24 hours and can not be less than 24 hours.
- The maximum retention period is 168 hours.
- The retention period can be modified during any point in time.
type: int
state:
description:
- Create or Delete the Kinesis Stream.
default: present
choices: [ 'present', 'absent' ]
type: str
wait:
description:
- Wait for operation to complete before returning.
default: true
type: bool
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
default: 300
type: int
tags:
description:
- "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })."
aliases: [ "resource_tags" ]
type: dict
encryption_state:
description:
- Enable or Disable encryption on the Kinesis Stream.
choices: [ 'enabled', 'disabled' ]
version_added: "2.5"
type: str
encryption_type:
description:
- The type of encryption.
- Defaults to C(KMS)
choices: ['KMS', 'NONE']
version_added: "2.5"
type: str
key_id:
description:
- The GUID or alias for the KMS key.
version_added: "2.5"
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic creation example:
- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
kinesis_stream:
name: test-stream
shards: 10
wait: yes
wait_timeout: 600
register: test_stream
# Basic creation example with tags:
- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
kinesis_stream:
name: test-stream
shards: 10
tags:
Env: development
wait: yes
wait_timeout: 600
register: test_stream
# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
kinesis_stream:
name: test-stream
retention_period: 48
shards: 10
tags:
Env: development
wait: yes
wait_timeout: 600
register: test_stream
# Basic delete example:
- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
kinesis_stream:
name: test-stream
state: absent
wait: yes
wait_timeout: 600
register: test_stream
# Basic enable encryption example:
- name: Encrypt Kinesis Stream test-stream.
kinesis_stream:
name: test-stream
state: present
encryption_state: enabled
encryption_type: KMS
key_id: alias/aws/kinesis
wait: yes
wait_timeout: 600
register: test_stream
# Basic disable encryption example:
- name: Encrypt Kinesis Stream test-stream.
kinesis_stream:
name: test-stream
state: present
encryption_state: disabled
encryption_type: KMS
key_id: alias/aws/kinesis
wait: yes
wait_timeout: 600
register: test_stream
'''
RETURN = '''
stream_name:
description: The name of the Kinesis Stream.
returned: when state == present.
type: str
sample: "test-stream"
stream_arn:
description: The amazon resource identifier
returned: when state == present.
type: str
sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
stream_status:
description: The current state of the Kinesis Stream.
returned: when state == present.
type: str
sample: "ACTIVE"
retention_period_hours:
description: Number of hours messages will be kept for a Kinesis Stream.
returned: when state == present.
type: int
sample: 24
tags:
description: Dictionary containing all the tags associated with the Kinesis stream.
returned: when state == present.
type: dict
sample: {
"Name": "Splunk",
"Env": "development"
}
'''
import re
import datetime
import time
from functools import reduce
try:
import botocore.exceptions
except ImportError:
pass # Taken care of by ec2.HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_native
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. FooBar == foo_bar
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'FooBar': []}
>>> test = convert_to_lower(test)
{
'foo_bar': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def make_tags_in_proper_format(tags):
"""Take a dictionary of tags and convert them into the AWS Tags format.
Args:
tags (list): The tags you want applied.
Basic Usage:
>>> tags = [{'Key': 'env', 'Value': 'development'}]
>>> make_tags_in_proper_format(tags)
{
"env": "development",
}
Returns:
Dict
"""
formatted_tags = dict()
for tag in tags:
formatted_tags[tag.get('Key')] = tag.get('Value')
return formatted_tags
def make_tags_in_aws_format(tags):
"""Take a dictionary of tags and convert them into the AWS Tags format.
Args:
tags (dict): The tags you want applied.
Basic Usage:
>>> tags = {'env': 'development', 'service': 'web'}
>>> make_tags_in_proper_format(tags)
[
{
"Value": "web",
"Key": "service"
},
{
"Value": "development",
"key": "env"
}
]
Returns:
List
"""
formatted_tags = list()
for key, val in tags.items():
formatted_tags.append({
'Key': key,
'Value': val
})
return formatted_tags
def get_tags(client, stream_name, check_mode=False):
"""Retrieve the tags for a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): Name of the Kinesis stream.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>> get_tags(client, stream_name)
Returns:
Tuple (bool, str, dict)
"""
err_msg = ''
success = False
params = {
'StreamName': stream_name,
}
results = dict()
try:
if not check_mode:
results = (
client.list_tags_for_stream(**params)['Tags']
)
else:
results = [
{
'Key': 'DryRunMode',
'Value': 'true'
},
]
success = True
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg, results
def find_stream(client, stream_name, check_mode=False):
"""Retrieve a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): Name of the Kinesis stream.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
Returns:
Tuple (bool, str, dict)
"""
err_msg = ''
success = False
params = {
'StreamName': stream_name,
}
results = dict()
has_more_shards = True
shards = list()
try:
if not check_mode:
while has_more_shards:
results = (
client.describe_stream(**params)['StreamDescription']
)
shards.extend(results.pop('Shards'))
has_more_shards = results['HasMoreShards']
results['Shards'] = shards
num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']])
results['OpenShardsCount'] = len(shards) - num_closed_shards
results['ClosedShardsCount'] = num_closed_shards
results['ShardsCount'] = len(shards)
else:
results = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 5,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': stream_name,
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name),
'StreamStatus': 'ACTIVE',
'EncryptionType': 'NONE'
}
success = True
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg, results
def wait_for_status(client, stream_name, status, wait_timeout=300,
check_mode=False):
"""Wait for the status to change for a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client
stream_name (str): The name of the kinesis stream.
status (str): The status to wait for.
examples. status=available, status=deleted
Kwargs:
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> wait_for_status(client, stream_name, 'ACTIVE', 300)
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
stream = dict()
err_msg = ""
while wait_timeout > time.time():
try:
find_success, find_msg, stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if check_mode:
status_achieved = True
break
elif status != 'DELETING':
if find_success and stream:
if stream.get('StreamStatus') == status:
status_achieved = True
break
else:
if not find_success:
status_achieved = True
break
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
time.sleep(polling_increment_secs)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
else:
err_msg = "Status {0} achieved successfully".format(status)
return status_achieved, err_msg, stream
def tags_action(client, stream_name, tags, action='create', check_mode=False):
"""Create or delete multiple tags from a Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
resource_id (str): The Amazon resource id.
tags (list): List of dictionaries.
examples.. [{Name: "", Values: [""]}]
Kwargs:
action (str): The action to perform.
valid actions == create and delete
default=create
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> resource_id = 'pcx-123345678'
>>> tags = {'env': 'development'}
>>> update_tags(client, resource_id, tags)
[True, '']
Returns:
List (bool, str)
"""
success = False
err_msg = ""
params = {'StreamName': stream_name}
try:
if not check_mode:
if action == 'create':
params['Tags'] = tags
client.add_tags_to_stream(**params)
success = True
elif action == 'delete':
params['TagKeys'] = list(tags)
client.remove_tags_from_stream(**params)
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
else:
if action == 'create':
success = True
elif action == 'delete':
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg
def recreate_tags_from_list(list_of_tags):
"""Recreate tags from a list of tuples into the Amazon Tag format.
Args:
list_of_tags (list): List of tuples.
Basic Usage:
>>> list_of_tags = [('Env', 'Development')]
>>> recreate_tags_from_list(list_of_tags)
[
{
"Value": "Development",
"Key": "Env"
}
]
Returns:
List
"""
tags = list()
i = 0
for i in range(len(list_of_tags)):
key_name = list_of_tags[i][0]
key_val = list_of_tags[i][1]
tags.append(
{
'Key': key_name,
'Value': key_val
}
)
return tags
def update_tags(client, stream_name, tags, check_mode=False):
"""Update tags for an amazon resource.
Args:
resource_id (str): The Amazon resource id.
tags (dict): Dictionary of tags you want applied to the Kinesis stream.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> stream_name = 'test-stream'
>>> tags = {'env': 'development'}
>>> update_tags(client, stream_name, tags)
[True, '']
Return:
Tuple (bool, str)
"""
success = False
changed = False
err_msg = ''
tag_success, tag_msg, current_tags = (
get_tags(client, stream_name, check_mode=check_mode)
)
if current_tags:
tags = make_tags_in_aws_format(tags)
current_tags_set = (
set(
reduce(
lambda x, y: x + y,
[make_tags_in_proper_format(current_tags).items()]
)
)
)
new_tags_set = (
set(
reduce(
lambda x, y: x + y,
[make_tags_in_proper_format(tags).items()]
)
)
)
tags_to_delete = list(current_tags_set.difference(new_tags_set))
tags_to_update = list(new_tags_set.difference(current_tags_set))
if tags_to_delete:
tags_to_delete = make_tags_in_proper_format(
recreate_tags_from_list(tags_to_delete)
)
delete_success, delete_msg = (
tags_action(
client, stream_name, tags_to_delete, action='delete',
check_mode=check_mode
)
)
if not delete_success:
return delete_success, changed, delete_msg
if tags_to_update:
tags = make_tags_in_proper_format(
recreate_tags_from_list(tags_to_update)
)
else:
return True, changed, 'Tags do not need to be updated'
if tags:
create_success, create_msg = (
tags_action(
client, stream_name, tags, action='create',
check_mode=check_mode
)
)
if create_success:
changed = True
return create_success, changed, create_msg
return success, changed, err_msg
def stream_action(client, stream_name, shard_count=1, action='create',
timeout=300, check_mode=False):
"""Create or Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
shard_count (int): Number of shards this stream will use.
action (str): The action to perform.
valid actions == create and delete
default=create
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> shard_count = 20
>>> stream_action(client, stream_name, shard_count, action='create')
Returns:
List (bool, str)
"""
success = False
err_msg = ''
params = {
'StreamName': stream_name
}
try:
if not check_mode:
if action == 'create':
params['ShardCount'] = shard_count
client.create_stream(**params)
success = True
elif action == 'delete':
client.delete_stream(**params)
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
else:
if action == 'create':
success = True
elif action == 'delete':
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg
def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='',
timeout=300, check_mode=False):
"""Create, Encrypt or Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
shard_count (int): Number of shards this stream will use.
action (str): The action to perform.
valid actions == create and delete
default=create
encryption_type (str): NONE or KMS
key_id (str): The GUID or alias for the KMS key
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> shard_count = 20
>>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws')
Returns:
List (bool, str)
"""
success = False
err_msg = ''
params = {
'StreamName': stream_name
}
try:
if not check_mode:
if action == 'start_encryption':
params['EncryptionType'] = encryption_type
params['KeyId'] = key_id
client.start_stream_encryption(**params)
success = True
elif action == 'stop_encryption':
params['EncryptionType'] = encryption_type
params['KeyId'] = key_id
client.stop_stream_encryption(**params)
success = True
else:
err_msg = 'Invalid encryption action {0}'.format(action)
else:
if action == 'start_encryption':
success = True
elif action == 'stop_encryption':
success = True
else:
err_msg = 'Invalid encryption action {0}'.format(action)
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg
def retention_action(client, stream_name, retention_period=24,
action='increase', check_mode=False):
"""Increase or Decrease the retention of messages in the Kinesis stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
retention_period (int): This is how long messages will be kept before
they are discarded. This can not be less than 24 hours.
action (str): The action to perform.
valid actions == create and delete
default=create
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> retention_period = 48
>>> retention_action(client, stream_name, retention_period, action='increase')
Returns:
Tuple (bool, str)
"""
success = False
err_msg = ''
params = {
'StreamName': stream_name
}
try:
if not check_mode:
if action == 'increase':
params['RetentionPeriodHours'] = retention_period
client.increase_stream_retention_period(**params)
success = True
err_msg = (
'Retention Period increased successfully to {0}'.format(retention_period)
)
elif action == 'decrease':
params['RetentionPeriodHours'] = retention_period
client.decrease_stream_retention_period(**params)
success = True
err_msg = (
'Retention Period decreased successfully to {0}'.format(retention_period)
)
else:
err_msg = 'Invalid action {0}'.format(action)
else:
if action == 'increase':
success = True
elif action == 'decrease':
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
except botocore.exceptions.ClientError as e:
err_msg = to_native(e)
return success, err_msg
def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False):
"""Increase or Decrease the number of shards in the Kinesis stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
number_of_shards (int): Number of shards this stream will use.
default=1
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> number_of_shards = 3
>>> update_shard_count(client, stream_name, number_of_shards)
Returns:
Tuple (bool, str)
"""
success = True
err_msg = ''
params = {
'StreamName': stream_name,
'ScalingType': 'UNIFORM_SCALING'
}
if not check_mode:
params['TargetShardCount'] = number_of_shards
try:
client.update_shard_count(**params)
except botocore.exceptions.ClientError as e:
return False, str(e)
return success, err_msg
def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None,
tags=None, wait=False, wait_timeout=300, check_mode=False):
"""Update an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
number_of_shards (int): Number of shards this stream will use.
default=1
retention_period (int): This is how long messages will be kept before
they are discarded. This can not be less than 24 hours.
tags (dict): The tags you want applied.
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> current_stream = {
'ShardCount': 3,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test-stream',
'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
'StreamStatus': "ACTIVE'
}
>>> stream_name = 'test-stream'
>>> retention_period = 48
>>> number_of_shards = 10
>>> update(client, current_stream, stream_name,
number_of_shards, retention_period )
Returns:
Tuple (bool, bool, str)
"""
success = True
changed = False
err_msg = ''
if retention_period:
if wait:
wait_success, wait_msg, current_stream = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
if not wait_success:
return wait_success, False, wait_msg
if current_stream.get('StreamStatus') == 'ACTIVE':
retention_changed = False
if retention_period > current_stream['RetentionPeriodHours']:
retention_changed, retention_msg = (
retention_action(
client, stream_name, retention_period, action='increase',
check_mode=check_mode
)
)
elif retention_period < current_stream['RetentionPeriodHours']:
retention_changed, retention_msg = (
retention_action(
client, stream_name, retention_period, action='decrease',
check_mode=check_mode
)
)
elif retention_period == current_stream['RetentionPeriodHours']:
retention_msg = (
'Retention {0} is the same as {1}'
.format(
retention_period,
current_stream['RetentionPeriodHours']
)
)
success = True
if retention_changed:
success = True
changed = True
err_msg = retention_msg
if changed and wait:
wait_success, wait_msg, current_stream = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
if not wait_success:
return wait_success, False, wait_msg
elif changed and not wait:
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
if current_stream['StreamStatus'] != 'ACTIVE':
err_msg = (
'Retention Period for {0} is in the process of updating'
.format(stream_name)
)
return success, changed, err_msg
else:
err_msg = (
'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
.format(current_stream.get('StreamStatus', 'UNKNOWN'))
)
return success, changed, err_msg
if current_stream['OpenShardsCount'] != number_of_shards:
success, err_msg = (
update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
)
if not success:
return success, changed, err_msg
changed = True
if wait:
wait_success, wait_msg, current_stream = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
if not wait_success:
return wait_success, changed, wait_msg
else:
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found and current_stream['StreamStatus'] != 'ACTIVE':
err_msg = (
'Number of shards for {0} is in the process of updating'
.format(stream_name)
)
return success, changed, err_msg
if tags:
tag_success, tag_changed, err_msg = (
update_tags(client, stream_name, tags, check_mode=check_mode)
)
if wait:
success, err_msg, status_stream = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
if success and changed:
err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
elif success and not changed:
err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name)
return success, changed, err_msg
def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
tags=None, wait=False, wait_timeout=300, check_mode=False):
"""Create an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
number_of_shards (int): Number of shards this stream will use.
default=1
retention_period (int): Can not be less than 24 hours
default=None
tags (dict): The tags you want applied.
default=None
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> number_of_shards = 10
>>> tags = {'env': 'test'}
>>> create_stream(client, stream_name, number_of_shards, tags=tags)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait:
wait_success, wait_msg, current_stream = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
if stream_found and current_stream.get('StreamStatus') != 'DELETING':
success, changed, err_msg = update(
client, current_stream, stream_name, number_of_shards,
retention_period, tags, wait, wait_timeout, check_mode=check_mode
)
else:
create_success, create_msg = (
stream_action(
client, stream_name, number_of_shards, action='create',
check_mode=check_mode
)
)
if not create_success:
changed = True
err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg)
return False, True, err_msg, {}
else:
changed = True
if wait:
wait_success, wait_msg, results = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
err_msg = (
'Kinesis Stream {0} is in the process of being created'
.format(stream_name)
)
if not wait_success:
return wait_success, True, wait_msg, results
else:
err_msg = (
'Kinesis Stream {0} created successfully'
.format(stream_name)
)
if tags:
changed, err_msg = (
tags_action(
client, stream_name, tags, action='create',
check_mode=check_mode
)
)
if changed:
success = True
if not success:
return success, changed, err_msg, results
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if retention_period and current_stream.get('StreamStatus') == 'ACTIVE':
changed, err_msg = (
retention_action(
client, stream_name, retention_period, action='increase',
check_mode=check_mode
)
)
if changed:
success = True
if not success:
return success, changed, err_msg, results
else:
err_msg = (
'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
.format(current_stream.get('StreamStatus', 'UNKNOWN'))
)
success = create_success
changed = True
if success:
stream_found, stream_msg, results = (
find_stream(client, stream_name, check_mode=check_mode)
)
tag_success, tag_msg, current_tags = (
get_tags(client, stream_name, check_mode=check_mode)
)
if current_tags and not check_mode:
current_tags = make_tags_in_proper_format(current_tags)
results['Tags'] = current_tags
elif check_mode and tags:
results['Tags'] = tags
else:
results['Tags'] = dict()
results = convert_to_lower(results)
return success, changed, err_msg, results
def delete_stream(client, stream_name, wait=False, wait_timeout=300,
check_mode=False):
"""Delete an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> delete_stream(client, stream_name)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
success, err_msg = (
stream_action(
client, stream_name, action='delete', check_mode=check_mode
)
)
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'DELETING', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Stream {0} deleted successfully'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Stream {0} is in the process of being deleted'
.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Stream {0} does not exist'.format(stream_name)
return success, changed, err_msg, results
def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
wait=False, wait_timeout=300, check_mode=False):
"""Start encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
encryption_type (str): KMS or NONE
key_id (str): KMS key GUID or alias
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> key_id = 'alias/aws'
>>> encryption_type = 'KMS'
>>> start_stream_encryption(client, stream_name,encryption_type,key_id)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
params = {
'StreamName': stream_name
}
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
success, err_msg = (
stream_encryption_action(
client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode
)
)
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
return success, changed, err_msg, results
def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
wait=True, wait_timeout=300, check_mode=False):
"""Stop encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
encryption_type (str): KMS or NONE
key_id (str): KMS key GUID or alias
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> start_stream_encryption(client, stream_name,encryption_type, key_id)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
params = {
'StreamName': stream_name
}
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
if current_stream.get('EncryptionType') == 'KMS':
success, err_msg = (
stream_encryption_action(
client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
)
)
elif current_stream.get('EncryptionType') == 'NONE':
success = True
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Stream {0} is in the process of stopping encryption.'.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Stream {0} does not exist.'.format(stream_name)
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
shards=dict(default=None, required=False, type='int'),
retention_period=dict(default=None, required=False, type='int'),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
wait=dict(default=True, required=False, type='bool'),
wait_timeout=dict(default=300, required=False, type='int'),
state=dict(default='present', choices=['present', 'absent']),
encryption_type=dict(required=False, choices=['NONE', 'KMS']),
key_id=dict(required=False, type='str'),
encryption_state=dict(required=False, choices=['enabled', 'disabled']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
retention_period = module.params.get('retention_period')
stream_name = module.params.get('name')
shards = module.params.get('shards')
state = module.params.get('state')
tags = module.params.get('tags')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
encryption_type = module.params.get('encryption_type')
key_id = module.params.get('key_id')
encryption_state = module.params.get('encryption_state')
if state == 'present' and not shards:
module.fail_json(msg='Shards is required when state == present.')
if retention_period:
if retention_period < 24:
module.fail_json(msg='Retention period can not be less than 24 hours.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
check_mode = module.check_mode
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='kinesis',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
err_msg = 'Boto3 Client Error - {0}'.format(to_native(e.msg))
module.fail_json(
success=False, changed=False, result={}, msg=err_msg
)
if state == 'present':
success, changed, err_msg, results = (
create_stream(
client, stream_name, shards, retention_period, tags,
wait, wait_timeout, check_mode
)
)
if encryption_state == 'enabled':
success, changed, err_msg, results = (
start_stream_encryption(
client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
)
)
elif encryption_state == 'disabled':
success, changed, err_msg, results = (
stop_stream_encryption(
client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
)
)
elif state == 'absent':
success, changed, err_msg, results = (
delete_stream(client, stream_name, wait, wait_timeout, check_mode)
)
if success:
module.exit_json(
success=success, changed=changed, msg=err_msg, **results
)
else:
module.fail_json(
success=success, changed=changed, msg=err_msg, result=results
)
if __name__ == '__main__':
main()
|
kustodian/ansible
|
lib/ansible/modules/cloud/amazon/kinesis_stream.py
|
Python
|
gpl-3.0
| 46,551
| 0.001547
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0017_auto_20170327_1934'),
]
operations = [
migrations.AlterField(
model_name='tovar',
name='tovar_image',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image1',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара1'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image10',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара10'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image11',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара11'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image2',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара2'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image3',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара3'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image4',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара4'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image5',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара5'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image6',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара6'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image7',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара7'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image8',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара8'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image9',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара9'),
),
]
|
IlyaDjurin/django-shop
|
shop/migrations/0018_auto_20170327_1937.py
|
Python
|
mit
| 3,554
| 0.0039
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
import json
import sys
from clicrud.device.generic import generic
def read(queue, finq, ranonceq, **kwargs):
_cli_input = "['command', 'commands', 'listofcommands']"
_command_list = []
_kwargs = {}
_kwargs = kwargs
_output_dict = {}
# _ranonce = False
for key in _kwargs:
if key in _cli_input:
if key == 'command':
_command_list.append(_kwargs.get(key))
if key == 'commands':
for key1 in _kwargs.get('commands'):
_command_list.append(key1)
if key == 'listofcommands':
try:
_command_file = open(_kwargs.get('listofcommands'), 'r')
_output = _command_file.readlines()
_command_file.close()
for line in _output:
line = line.translate(None, '\r\n')
_command_list.append(line)
except:
logging.error("Could not open 'listofcommands' file")
# Build transport
_transport = generic(**_kwargs)
if _transport.err:
finq.put('error')
_transport.close()
return
# Now we want to call each command and put the string output in a list
for index, command in enumerate(_command_list):
_output_dict[command] = _transport.read(command, return_type='string')
if _kwargs['setup']._splash is True:
sys.stdout.write("\r[%4s/%4s] Complete - " % (len(_command_list),
index+1) +
time.strftime("%d-%m-%Y") +
time.strftime("-%H:%M:%S"))
sys.stdout.flush()
# PEP8 Fix
# if _kwargs.has_key('delay'):
if "delay" in _kwargs:
time.sleep(_kwargs['delay'])
# Sets the ranonce bool if triggered once
# if not _ranonce:
# _ranonce = True
# ranonceq.put(True)
ranonceq.put(True)
queue.put(_output_dict)
# If we need to output to a file, let's do that.
# PEP8 Fix
# if _kwargs.has_key('fileoutput'):
if "fileoutput" in _kwargs:
# Create a filename on hostname+date
# Output the _output_dict to it in the right format
_filename = _transport.hostname
_filename += time.strftime("%d-%m-%Y-") + time.strftime("-%H-%M-%S")
try:
f = open(_filename, 'w')
if _kwargs.get('fileformat') == 'json':
f.write(json.dumps(_output_dict))
if _kwargs.get('fileformat') == 'string':
for command in _command_list:
f.write("COMMAND: " + command + "--------------------\r\n")
f.write(_output_dict.get(command) + "\r\n\r\n")
f.close()
except:
logging.error("Could not open/create file for output of commands")
finq.put('completed_run')
_transport.close()
# print _command_list
|
DavidJohnGee/clicrud
|
clicrud/crud/__init__.py
|
Python
|
apache-2.0
| 3,616
| 0.000553
|
from __future__ import division, absolute_import, print_function
from flask import current_app, Blueprint, jsonify, url_for, request
from idb.helpers.cors import crossdomain
from .common import json_error, idbmodel, logger
this_version = Blueprint(__name__,__name__)
def format_list_item(t,uuid,etag,modified,version,parent):
links = {}
if t in current_app.config["PARENT_MAP"] and parent is not None:
links["".join(current_app.config["PARENT_MAP"][t][:-1])] = url_for(".item",t=current_app.config["PARENT_MAP"][t],u=parent,_external=True)
links["".join(t[:-1])] = url_for(".item",t=t,u=uuid,_external=True)
return {
"idigbio:uuid": uuid,
"idigbio:etag": etag,
"idigbio:dateModified": modified.isoformat(),
"idigbio:version": version,
"idigbio:links": links,
}
def format_item(t,uuid,etag,modified,version,parent,data,siblings,ids):
r = format_list_item(t,uuid,etag,modified,version,parent)
del r["idigbio:links"]["".join(t[:-1])]
for l in r["idigbio:links"]:
r["idigbio:links"][l] = [r["idigbio:links"][l]]
l = {}
if siblings is not None:
for k in siblings:
l[k] = []
for i in siblings[k]:
l[k].append(url_for(".item",t=k,u=i,_external=True))
r["idigbio:data"] = data
r["idigbio:links"].update(l)
r["idigbio:recordIds"] = ids
return r
@this_version.route('/<string:t>/<uuid:u>/<string:st>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def subitem(t,u,st):
if not (t in current_app.config["SUPPORTED_TYPES"] and st in current_app.config["SUPPORTED_TYPES"]):
return json_error(404)
limit = request.args.get("limit")
if limit is not None:
limit = int(limit)
else:
limit = 100
offset = request.args.get("offset")
if offset is not None:
offset = int(offset)
else:
offset = 0
r = {}
l = [
format_list_item(
st,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
) for v in idbmodel.get_children_list(str(u), "".join(st[:-1]),limit=limit,offset=offset)
]
r["idigbio:items"] = l
r["idigbio:itemCount"] = idbmodel.get_children_count(str(u), "".join(st[:-1]))
return jsonify(r)
@this_version.route('/<string:t>/<uuid:u>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def item(t,u):
if t not in current_app.config["SUPPORTED_TYPES"]:
return json_error(404)
version = request.args.get("version")
v = idbmodel.get_item(str(u), version=version)
if v is not None:
if v["data"] is None:
return json_error(500)
if v["type"] + "s" == t:
r = format_item(
t,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
v["data"],
v["siblings"],
v["recordids"]
)
return jsonify(r)
else:
return json_error(404)
else:
return json_error(404)
@this_version.route('/<string:t>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def list(t):
if t not in current_app.config["SUPPORTED_TYPES"]:
return json_error(404)
limit = request.args.get("limit")
if limit is not None:
limit = int(limit)
else:
limit = 100
offset = request.args.get("offset")
if offset is not None:
offset = int(offset)
else:
offset = 0
r = {}
l = [
format_list_item(
t,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
) for v in idbmodel.get_type_list("".join(t[:-1]),limit=limit,offset=offset)
]
r["idigbio:items"] = l
r["idigbio:itemCount"] = idbmodel.get_type_count("".join(t[:-1]))
return jsonify(r)
@this_version.route('/', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def index():
r = {}
for t in current_app.config["SUPPORTED_TYPES"]:
r[t] = url_for(".list",t=t,_external=True)
return jsonify(r)
|
iDigBio/idb-backend
|
idb/data_api/v1.py
|
Python
|
gpl-3.0
| 4,235
| 0.012043
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
import collections
import copy
import json
import re
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import build_info
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the associated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._origin_step_stats = step_stats
self._step_stats = None
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: name = op(arg, arg, ...).
match = re.match(r'(.*) = (.*)\((.*)\)', label)
if match is None:
return 'unknown', 'unknown', []
nn, op, inputs = match.groups()
if not inputs:
inputs = []
else:
inputs = inputs.split(', ')
return nn, op, inputs
def _parse_kernel_label(self, label, node_name):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: retval (arg) detail @@annotation
start = label.find('@@')
end = label.find('#')
if start >= 0 and end >= 0 and start + 2 < end:
node_name = label[start + 2:end]
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
name, op = fields[:2]
return name, op
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
inputs = []
if is_gputrace:
node_name, op = self._parse_kernel_label(nodestats.timeline_label,
node_name)
elif node_name == 'RecvTensor':
# RPC tracing does not use the standard timeline_label format.
op = 'RecvTensor'
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
if build_info.build_info['is_rocm_build']:
args['kernel'] = nodestats.timeline_label.split('@@')[0]
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in sorted(
alloc_list, key=lambda allocation: allocation[0]):
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def _preprocess_op_time(self, op_time):
"""Update the start and end time of ops in step stats.
Args:
op_time: How the execution time of op is shown in timeline. Possible values
are "schedule", "gpu" and "all". "schedule" will show op from the time it
is scheduled to the end of the scheduling. Notice by the end of its
scheduling its async kernels may not start yet. It is shown using the
default value from step_stats. "gpu" will show op with the execution time
of its kernels on GPU. "all" will show op from the start of its scheduling
to the end of its last kernel.
"""
if op_time == 'schedule':
self._step_stats = self._origin_step_stats
return
self._step_stats = copy.deepcopy(self._origin_step_stats)
# Separate job task and gpu tracer stream
stream_all_stats = []
job_stats = []
for stats in self._step_stats.dev_stats:
if '/stream:all' in stats.device:
stream_all_stats.append(stats)
elif '/job' in stats.device:
job_stats.append(stats)
# Record the start time of the first kernel and the end time of
# the last gpu kernel for all ops.
op_gpu_start = {}
op_gpu_end = {}
for stats in stream_all_stats:
for kernel in stats.node_stats:
name, _ = self._parse_kernel_label(kernel.timeline_label,
kernel.node_name)
start = kernel.all_start_micros
end = kernel.all_start_micros + kernel.all_end_rel_micros
if name in op_gpu_start:
op_gpu_start[name] = min(op_gpu_start[name], start)
op_gpu_end[name] = max(op_gpu_end[name], end)
else:
op_gpu_start[name] = start
op_gpu_end[name] = end
# Update the start and end time of each op according to the op_time
for stats in job_stats:
for op in stats.node_stats:
if op.node_name in op_gpu_start:
end = max(op_gpu_end[op.node_name],
op.all_start_micros + op.all_end_rel_micros)
if op_time == 'gpu':
op.all_start_micros = op_gpu_start[op.node_name]
op.all_end_rel_micros = end - op.all_start_micros
def analyze_step_stats(self,
show_dataflow=True,
show_memory=True,
op_time='schedule'):
"""Analyze the step stats and format it into Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all". "schedule" will show op
from the time it is scheduled to the end of the scheduling. Notice by
the end of its scheduling its async kernels may not start yet. It is
shown using the default value from step_stats. "gpu" will show op with
the execution time of its kernels on GPU. "all" will show op from the
start of its scheduling to the end of its last kernel.
Returns:
A 'StepStatsAnalysis' object.
"""
self._preprocess_op_time(op_time)
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self,
show_dataflow=True,
show_memory=False,
op_time='schedule'):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all".
"schedule" will show op from the time it is scheduled to the end of
the scheduling.
Notice by the end of its scheduling its async kernels may not start
yet. It is shown using the default value from step_stats.
"gpu" will show op with the execution time of its kernels on GPU.
"all" will show op from the start of its scheduling to the end of
its last kernel.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
|
tensorflow/tensorflow
|
tensorflow/python/client/timeline.py
|
Python
|
apache-2.0
| 28,612
| 0.005033
|
import blinker
from concurrency.fields import IntegerVersionField
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from game.apps.core.models.planet.models import Planet
from game.utils.models import ResourceContainer
from game.utils.polymorph import PolymorphicBase
from jsonfield import JSONField
import game.apps.core.signals
class Building(PolymorphicBase):
level = models.IntegerField(default=1)
data = JSONField(default={})
planet = models.ForeignKey(Planet, related_name="buildings")
version = IntegerVersionField()
user = models.ForeignKey(User, related_name="buildings")
def save(self, *args, **kwargs):
signal = blinker.signal(game.apps.core.signals.building % self.id)
signal.send(self, building=self)
super().save(*args, **kwargs)
class Meta:
app_label = 'core'
ordering = ('id', )
class Citadel(Building):
class Meta:
proxy = True
def process_turn(self):
warehouse = self.owner.buildings.filter(type='Warehouse')
warehouse.add_resource("Aluminium", 10)
warehouse.add_resource("Steel", 10)
warehouse.save()
class Warehouse(Building, ResourceContainer):
class Meta:
proxy = True
class Terminal(Building):
class Meta:
proxy = True
class Mine(Building):
class Meta:
proxy = True
#TODO use Django ready()
@receiver(post_save, sender=User, dispatch_uid="create_default_buildings")
def create_default_buildings(sender, **kwargs):
if kwargs['created']:
Citadel.objects.create(user=kwargs['instance'], planet_id=1) # TODO don't hard-code planet id
Warehouse.objects.create(user=kwargs['instance'], planet_id=1) # TODO don't hard-code planet id
def get_base(self):
#TODO cache
return self.buildings.get(type="Base")
User.base = property(get_base)
|
piotrlewalski/birdstorm
|
game/apps/core/models/buildings.py
|
Python
|
mit
| 2,036
| 0.002947
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slidersd.ui'
#
# Created: Tue Mar 17 23:31:52 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(392, 74)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.value = QtGui.QLabel(Form)
self.value.setText(_fromUtf8(""))
self.value.setObjectName(_fromUtf8("value"))
self.horizontalLayout_2.addWidget(self.value)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(10, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_3 = QtGui.QLabel(Form)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout.addWidget(self.label_3)
self.horizontalSlider = QtGui.QSlider(Form)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName(_fromUtf8("horizontalSlider"))
self.horizontalLayout.addWidget(self.horizontalSlider)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "a = ", None))
self.label_3.setText(_translate("Form", "-10", None))
self.label_4.setText(_translate("Form", "10", None))
|
MSHallOpenSoft/plotter
|
Thesidetab/sliderder.py
|
Python
|
gpl-2.0
| 2,815
| 0.001421
|
"""
Platform for a Generic Modbus Thermostat.
This uses a setpoint and process
value within the controller, so both the current temperature register and the
target temperature register need to be configured.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.modbus/
"""
import logging
import struct
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_SLAVE, ATTR_TEMPERATURE)
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components import modbus
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['modbus']
# Parameters not defined by homeassistant.const
CONF_TARGET_TEMP = 'target_temp_register'
CONF_CURRENT_TEMP = 'current_temp_register'
CONF_DATA_TYPE = 'data_type'
CONF_COUNT = 'data_count'
CONF_PRECISION = 'precision'
DATA_TYPE_INT = 'int'
DATA_TYPE_UINT = 'uint'
DATA_TYPE_FLOAT = 'float'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Required(CONF_TARGET_TEMP): cv.positive_int,
vol.Required(CONF_CURRENT_TEMP): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_FLOAT):
vol.In([DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT]),
vol.Optional(CONF_COUNT, default=2): cv.positive_int,
vol.Optional(CONF_PRECISION, default=1): cv.positive_int
})
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus Thermostat Platform."""
name = config.get(CONF_NAME)
modbus_slave = config.get(CONF_SLAVE)
target_temp_register = config.get(CONF_TARGET_TEMP)
current_temp_register = config.get(CONF_CURRENT_TEMP)
data_type = config.get(CONF_DATA_TYPE)
count = config.get(CONF_COUNT)
precision = config.get(CONF_PRECISION)
add_entities([ModbusThermostat(name, modbus_slave,
target_temp_register, current_temp_register,
data_type, count, precision)], True)
class ModbusThermostat(ClimateDevice):
"""Representation of a Modbus Thermostat."""
def __init__(self, name, modbus_slave, target_temp_register,
current_temp_register, data_type, count, precision):
"""Initialize the unit."""
self._name = name
self._slave = modbus_slave
self._target_temperature_register = target_temp_register
self._current_temperature_register = current_temp_register
self._target_temperature = None
self._current_temperature = None
self._data_type = data_type
self._count = int(count)
self._precision = precision
self._structure = '>f'
data_types = {DATA_TYPE_INT: {1: 'h', 2: 'i', 4: 'q'},
DATA_TYPE_UINT: {1: 'H', 2: 'I', 4: 'Q'},
DATA_TYPE_FLOAT: {1: 'e', 2: 'f', 4: 'd'}}
self._structure = '>{}'.format(data_types[self._data_type]
[self._count])
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update Target & Current Temperature."""
self._target_temperature = self.read_register(
self._target_temperature_register)
self._current_temperature = self.read_register(
self._current_temperature_register)
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temperature = kwargs.get(ATTR_TEMPERATURE)
if target_temperature is None:
return
byte_string = struct.pack(self._structure, target_temperature)
register_value = struct.unpack('>h', byte_string[0:2])[0]
try:
self.write_register(self._target_temperature_register,
register_value)
except AttributeError as ex:
_LOGGER.error(ex)
def read_register(self, register):
"""Read holding register using the modbus hub slave."""
try:
result = modbus.HUB.read_holding_registers(self._slave, register,
self._count)
except AttributeError as ex:
_LOGGER.error(ex)
byte_string = b''.join(
[x.to_bytes(2, byteorder='big') for x in result.registers])
val = struct.unpack(self._structure, byte_string)[0]
register_value = format(val, '.{}f'.format(self._precision))
return register_value
def write_register(self, register, value):
"""Write register using the modbus hub slave."""
modbus.HUB.write_registers(self._slave, register, [value, 0])
|
PetePriority/home-assistant
|
homeassistant/components/modbus/climate.py
|
Python
|
apache-2.0
| 5,304
| 0
|
# -*- coding: utf-8 -*-
from greek_stemmer.closets.word_exceptions import exceptions
def test_word_exceptions():
assert isinstance(exceptions, dict)
|
kpech21/Greek-Stemmer
|
tests/closets/test_word_exceptions.py
|
Python
|
lgpl-3.0
| 157
| 0
|
from pypif.obj import Property, Scalar
from .base import DFTParser, Value_if_true, InvalidIngesterException
import os
import re
from ase.io.vasp import read_vasp_out
from pypif.obj import Value, FileReference
from dftparse.vasp.outcar_parser import OutcarParser
from dftparse.vasp.eigenval_parser import EigenvalParser
class VaspParser(DFTParser):
'''
Parser for VASP calculations
'''
def __init__(self, files):
super(VaspParser, self).__init__(files)
self.settings = {}
parser = OutcarParser()
# Find the outcar file
def _find_file(name):
"""Find a filename that contains a certain string"""
name = name.upper()
my_file = None
for f in self._files:
if os.path.basename(f).upper().startswith(name):
if my_file is not None:
raise InvalidIngesterException('Found more than one {} file'.format(name))
my_file = f
return my_file
self.outcar = _find_file('OUTCAR')
if self.outcar is None:
raise InvalidIngesterException('OUTCAR not found!')
with open(self.outcar, "r") as fr:
for parsed_line in parser.parse(fr.readlines()):
for k, v in parsed_line.items():
if k in self.settings:
self.settings[k].append(v)
else:
self.settings[k] = [v]
# Find the DOSCAR, EIGENVAL, and INCAR files
# None of these are required so we do not throw exceptions
self.incar = _find_file('INCAR')
self.poscar = _find_file('POSCAR')
self.doscar = _find_file('DOSCAR')
self.eignval = _find_file('EIGNVAL')
def get_name(self): return "VASP"
def get_output_structure(self):
self.atoms = read_vasp_out(self.outcar)
return self.atoms
def get_outcar(self):
raw_path = self.outcar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
return Property(files=[FileReference(
relative_path=raw_path
)])
def get_incar(self):
if self.incar is None: return None
raw_path = self.incar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
return Value(files=[FileReference(
relative_path=raw_path
)])
def get_poscar(self):
if self.poscar is None: return None
raw_path = self.poscar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
return Value(files=[FileReference(
relative_path=raw_path
)])
def get_cutoff_energy(self):
# Open up the OUTCAR
with open(self.outcar, 'r') as fp:
# Look for ENCUT
for line in fp:
if "ENCUT" in line:
words = line.split()
return Value(scalars=[Scalar(value=float(words[2]))], units=words[3])
# Error handling: ENCUT not found
raise Exception('ENCUT not found')
@Value_if_true
def uses_SOC(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#look for LSORBIT
for line in fp:
if "LSORBIT" in line:
words = line.split()
return words[2] == 'T'
# Error handling: LSORBIT not found
raise Exception('LSORBIT not found')
@Value_if_true
def is_relaxed(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
# Look for NSW
for line in fp:
if "NSW" in line:
words = line.split()
return int(words[2]) != 0
# Error handling: NSW not found
raise Exception('NSW not found')
def get_xc_functional(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
# Look for TITEL
for line in fp:
if "TITEL" in line:
words = line.split()
return Value(scalars=[Scalar(value=words[2])])
def get_pp_name(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#initialize empty list to store pseudopotentials
pp = []
# Look for TITEL
for line in fp:
if "TITEL" in line:
words = line.split()
pp.append(words[3])
return Value(vectors=[[Scalar(value=x) for x in pp]])
def get_KPPRA(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#store the number of atoms and number of irreducible K-points
for line in fp:
if "number of ions NIONS =" in line:
words = line.split()
NI = int(words[11])
elif "k-points NKPTS =" in line:
words = line.split()
NIRK = float(words[3])
#check if the number of k-points was reduced by VASP if so, sum all the k-points weight
if "irreducible" in open(self.outcar).read():
fp.seek(0)
for line in fp:
#sum all the k-points weight
if "Coordinates Weight" in line:
NK=0; counter = 0
for line in fp:
if counter == NIRK:
break
NK += float(line.split()[3])
counter += 1
return Value(scalars=[Scalar(value=NI*NK)])
#if k-points were not reduced KPPRA equals the number of atoms * number of irreducible k-points
else:
return Value(scalars=[Scalar(value=NI*NIRK)])
def _is_converged(self):
# Follows the procedure used by qmpy, but without reading the whole file into memory
# Source: https://github.com/wolverton-research-group/qmpy/blob/master/qmpy/analysis/vasp/calculation.py
with open(self.outcar) as fp:
# Part 1: Determine the NELM
nelm = None
for line in fp:
if line.startswith(" NELM ="):
nelm = int(line.split()[2][:-1])
break
# If we don't find it, tell the user
if nelm is None:
raise Exception('NELM not found. Cannot tell if this result is converged')
# Now, loop through the file. What we want to know is whether the last ionic
# step of this file terminates because it converges or because we hit NELM
re_iter = re.compile('([0-9]+)\( *([0-9]+)\)')
converged = False
for line in fp:
# Count the ionic steps
if 'Iteration' in line:
ionic, electronic = map(int, re_iter.findall(line)[0])
# If the loop is finished, mark the number of electronic steps
if 'aborting loop' in line:
converged = electronic < nelm
return converged
def get_total_energy(self):
with open(self.outcar) as fp:
last_energy = None
for line in fp:
if line.startswith(' free energy TOTEN'):
last_energy = float(line.split()[4])
if last_energy is None:
return None
return Property(scalars=[Scalar(value=last_energy)], units='eV')
def get_version_number(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#look for vasp
for line in fp:
if "vasp" in line:
words = line.split()
return (words[0].strip('vasp.'))
break
# Error handling: vasp not found
raise Exception('vasp not found')
def get_U_settings(self):
#Open up the OUTCAR
with open(self.outcar) as fp:
#Check if U is used
if "LDAU" in open(self.outcar).read():
U_param = {}
atoms = []
#get the list of pseupotential used
for line in fp:
if "TITEL" in line:
atoms.append(line.split()[3])
#Get the U type used
if "LDAUTYPE" in line:
U_param['Type'] = int(line.split()[-1])
atoms.reverse()
fp.seek(0)
#Get the L value
U_param['Values'] = {}
for line in fp:
for atom, i in zip(atoms, range(len(atoms))):
if "LDAUL" in line:
U_param['Values'][atom] = {'L': int(line.split()[-1-i])}
fp.seek(0)
#Get the U value
for line in fp:
for atom, i in zip(atoms, range(len(atoms))):
if "LDAUU" in line:
U_param['Values'][atom]['U'] = float(line.split()[-1-i])
fp.seek(0)
#Get the J value
for line in fp:
for atom, i in zip(atoms, range(len(atoms))):
if "LDAUJ" in line:
U_param['Values'][atom]['J'] = float(line.split()[-1-i])
return Value(**U_param)
#if U is not used, return None
else:
return None
def get_vdW_settings(self):
#define the name of the vdW methods in function of their keyword
vdW_dict = {'BO':'optPBE-vdW', 'MK':'optB88-vdW', 'ML':'optB86b-vdW','RE':'vdW-DF','OR':'Klimes-Bowler-Michaelides'}
#Open up the OUTCAR
with open(self.outcar) as fp:
#Check if vdW is used
if "LUSE_VDW" in open(self.outcar).read():
#if vdW is used, get its keyword
for line in fp:
if "GGA =" in line:
words = line.split()
return Value(scalars=[Scalar(value=vdW_dict[words[2]])])
#if vdW is not used, return None
else:
return None
def get_pressure(self):
#define pressure dictionnary because since when is kB = kbar? Come on VASP people
pressure_dict = {'kB':'kbar'}
#Check if ISIF = 0 is used
if "ISIF = 0" in open(self.outcar).read():
#if ISIF = 0 is used, print this crap
return None
#if ISIF is not 0 then extract pressure and units
else:
#scan file in reverse to have the final pressure
for line in reversed(open(self.outcar).readlines()):
if "external pressure" in line:
words = line.split()
return Property(scalars=[Scalar(value=float(words[3]))], units=pressure_dict[words[4]])
break
def get_stresses(self):
#Check if ISIF = 0 is used
if "ISIF = 0" in open(self.outcar).read():
return None
#Check if ISIF = 1 is used
elif "ISIF = 1" in open(self.outcar).read():
return None
else:
#scan file in reverse to have the final pressure
for line in open(self.outcar).readlines():
if "in kB" in line:
words = line.split()
XX = float(words[2]); YY = float(words[3]); ZZ = float(words[4]); XY= float(words[5]); YZ = float(words[6]); ZX = float(words[7])
matrix = [[XX,XY,ZX],[XY,YY,YZ],[ZX,YZ,ZZ]]
wrapped = [[Scalar(value=x) for x in y] for y in matrix]
return Property(matrices=[wrapped], units='kbar')
def get_forces(self):
self.atoms = read_vasp_out(self.outcar)
forces_raw = self.atoms.get_calculator().results['forces'].tolist()
forces_wrapped = [[Scalar(value=x) for x in y] for y in forces_raw]
positions_raw = self.atoms.positions.tolist()
positions_wrapped = [[Scalar(value=x) for x in y] for y in positions_raw]
return Property(
vectors=forces_wrapped,
conditions=Value(name="positions", vectors=positions_wrapped)
)
@staticmethod
def _get_bandgap_from_bands(energies, nelec):
"""Compute difference in conduction band min and valence band max"""
nelec = int(nelec)
valence = [x[nelec-1] for x in energies]
conduction = [x[nelec] for x in energies]
return max(min(conduction) - max(valence), 0.0)
@staticmethod
def _get_bandgap_eigenval(eigenval_fname, outcar_fname):
"""Get the bandgap from the EIGENVAL file"""
with open(outcar_fname, "r") as f:
parser = OutcarParser()
nelec = next(iter(filter(lambda x: "number of electrons" in x, parser.parse(f.readlines()))))["number of electrons"]
with open(eigenval_fname, "r") as f:
eigenval_info = list(EigenvalParser().parse(f.readlines()))
# spin_polarized = (2 == len(next(filter(lambda x: "kpoint" in x, eigenval_info))["occupancies"][0]))
# if spin_polarized:
all_energies = [zip(*x["energies"]) for x in eigenval_info if "energies" in x]
spin_energies = zip(*all_energies)
gaps = [VaspParser._get_bandgap_from_bands(x, nelec/2.0) for x in spin_energies]
return min(gaps)
@staticmethod
def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap
def get_band_gap(self):
"""Get the bandgap, either from the EIGENVAL or DOSCAR files"""
if self.outcar is not None and self.eignval is not None:
bandgap = VaspParser._get_bandgap_eigenval(self.eignval, self.outcar)
elif self.doscar is not None:
bandgap = VaspParser._get_bandgap_doscar(self.doscar)
else:
return None
return Property(scalars=[Scalar(value=round(bandgap, 3))], units='eV')
def get_dos(self):
if self.doscar is None:
return None
#open DOSCAR
with open(self.doscar) as fp:
for i in range(6):
l = fp.readline()
n_step = int(l.split()[2])
energy = []; dos = []
for i in range(n_step):
l = fp.readline().split()
e = float(l.pop(0))
energy.append(Scalar(value=e))
dens = 0
for j in range(int(len(l)/2)):
dens += float(l[j])
dos.append(Scalar(value=dens))
# Convert to property
return Property(scalars=dos, units='number of states per unit cell',
conditions=Value(name='energy', scalars=energy, units='eV'))
def get_total_magnetization(self):
if "total magnetization" not in self.settings:
return None
total_magnetization = self.settings["total magnetization"][-1]
return Property(scalars=[Scalar(value=total_magnetization)], units="Bohr")
def get_final_volume(self):
if "volume of cell" not in self.settings:
return None
final_volume = self.settings["volume of cell"][-1]
return Property(scalars=[Scalar(value=final_volume)], units="Angstrom^3/cell")
def get_initial_volume(self):
if "volume of cell" not in self.settings:
return None
initial_volume = self.settings["volume of cell"][0]
return Property(scalars=[Scalar(value=initial_volume)], units="Angstrom^3/cell")
|
CitrineInformatics/pif-dft
|
dfttopif/parsers/vasp.py
|
Python
|
apache-2.0
| 16,836
| 0.00689
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reading, writing, and describing memory.
"""
import gdb
import pwndbg.compat
import pwndbg.typeinfo
PAGE_SIZE = 0x1000
PAGE_MASK = ~(PAGE_SIZE-1)
MMAP_MIN_ADDR = 0x8000
def read(addr, count, partial=False):
result = ''
try:
result = gdb.selected_inferior().read_memory(addr, count)
except gdb.error as e:
if not partial:
raise
stop_addr = int(e.message.split()[-1], 0)
if stop_addr != addr:
return read(addr, stop_addr-addr)
# QEMU will return the start address as the failed
# read address. Try moving back a few pages at a time.
stop_addr = addr + count
# Move the stop address down to the previous page boundary
stop_addr &= PAGE_MASK
while stop_addr > addr:
result = read(addr, stop_addr-addr)
if result:
return result
# Move down by another page
stop_addr -= PAGE_SIZE
if pwndbg.compat.python3:
result = result.tobytes()
return bytearray(result)
def readtype(gdb_type, addr):
return int(gdb.Value(addr).cast(gdb_type.pointer()).dereference())
def write(addr, data):
gdb.selected_inferior().write_memory(addr, data)
def peek(address):
try: return read(address, 1)
except: pass
return None
def poke(address):
c = peek(address)
if c is None: return False
try: write(address, c)
except: return False
return True
def byte(addr): return readtype(pwndbg.typeinfo.uchar, addr)
def uchar(addr): return readtype(pwndbg.typeinfo.uchar, addr)
def ushort(addr): return readtype(pwndbg.typeinfo.ushort, addr)
def uint(addr): return readtype(pwndbg.typeinfo.uint, addr)
def pvoid(addr): return readtype(pwndbg.typeinfo.pvoid, addr)
def u8(addr): return readtype(pwndbg.typeinfo.uint8, addr)
def u16(addr): return readtype(pwndbg.typeinfo.uint16, addr)
def u32(addr): return readtype(pwndbg.typeinfo.uint32, addr)
def u64(addr): return readtype(pwndbg.typeinfo.uint64, addr)
def u(addr, size):
return {
8: u8,
16: u16,
32: u32,
64: u64
}[size](addr)
def s8(addr): return readtype(pwndbg.typeinfo.int8, addr)
def s16(addr): return readtype(pwndbg.typeinfo.int16, addr)
def s32(addr): return readtype(pwndbg.typeinfo.int32, addr)
def s64(addr): return readtype(pwndbg.typeinfo.int64, addr)
def write(addr, data):
gdb.selected_inferior().write_memory(addr, data)
def poi(type, addr): return gdb.Value(addr).cast(type.pointer()).dereference()
def round_down(address, align): return address & ~(align-1)
def round_up(address, align): return (address+(align-1))&(~(align-1))
align_down = round_down
align_up = round_up
def page_align(address): return round_down(address, PAGE_SIZE)
def page_size_align(address): return round_up(address, PAGE_SIZE)
def page_offset(address): return (address & (PAGE_SIZE-1))
assert round_down(0xdeadbeef, 0x1000) == 0xdeadb000
assert round_up(0xdeadbeef, 0x1000) == 0xdeadc000
def find_upper_boundary(addr, max_pages=1024):
addr = pwndbg.memory.page_align(int(addr))
try:
for i in range(max_pages):
pwndbg.memory.read(addr, 1)
import sys
sys.stdout.write(hex(addr) + '\n')
addr += pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
return addr
def find_lower_boundary(addr, max_pages=1024):
addr = pwndbg.memory.page_align(int(addr))
try:
for i in range(max_pages):
pwndbg.memory.read(addr, 1)
addr -= pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
return addr
class Page(object):
"""
Represents the address space and page permissions of at least
one page of memory.
"""
vaddr = 0 #: Starting virtual address
memsz = 0 #: Size of the address space, in bytes
flags = 0 #: Flags set by the ELF file, see PF_X, PF_R, PF_W
offset = 0 #: Offset into the original ELF file that the data is loaded from
objfile = '' #: Path to the ELF on disk
def __init__(self, start, size, flags, offset, objfile=''):
self.vaddr = start
self.memsz = size
self.flags = flags
self.offset = offset
self.objfile = objfile
# if self.rwx:
# self.flags = self.flags ^ 1
@property
def read(self):
return bool(self.flags & 4)
@property
def write(self):
return bool(self.flags & 2)
@property
def execute(self):
return bool(self.flags & 1)
@property
def rw(self):
return self.read and self.write
@property
def rwx(self):
return self.read and self.write and self.execute
@property
def permstr(self):
flags = self.flags
return ''.join(['r' if flags & 4 else '-',
'w' if flags & 2 else '-',
'x' if flags & 1 else '-',
'p'])
def __str__(self):
width = 2 + 2*pwndbg.typeinfo.ptrsize
fmt_string = "%#{}x %#{}x %s %8x %-6x %s"
fmt_string = fmt_string.format(width, width)
return fmt_string % (self.vaddr,
self.vaddr+self.memsz,
self.permstr,
self.memsz,
self.offset,
self.objfile or '')
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__str__())
def __contains__(self, a):
return self.vaddr <= a < (self.vaddr + self.memsz)
def __eq__(self, other):
return self.vaddr == getattr(other, 'vaddr', other)
def __lt__(self, other):
return self.vaddr < getattr(other, 'vaddr', other)
def __hash__(self):
return hash((self.vaddr, self.memsz, self.flags, self.offset, self.objfile))
|
bj7/pwndbg
|
pwndbg/memory.py
|
Python
|
mit
| 5,918
| 0.010308
|
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <bfox@redhat.com>
# Mike Fulbright <msf@redhat.com>
# Jakub Jelinek <jakub@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
import stat
from glob import glob
from tempfile import mkstemp
import threading
from pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import iutil
from pyanaconda.iutil import open # pylint: disable=redefined-builtin
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.gui_startup_failed = False
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
# Data for inhibiting the screensaver
self.dbus_session_connection = None
self.dbus_inhibit_id = None
# This is used to synchronize Gtk.main calls between the graphical
# interface and error dialogs. Whoever gets to their initialization code
# first will lock gui_initializing
self.gui_initialized = threading.Lock()
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.dnfpayload import DNFPayload
klass = DNFPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
# zRAM swap devices need to be protected
for zram_dev in glob("/dev/zram*"):
specs.append(zram_dev)
return specs
@property
def storage(self):
if not self._storage:
import blivet
import blivet.arch
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
if blivet.arch.isS390():
# want to make sure s390 plugin is loaded
if "s390" not in blockdev.get_available_plugin_names():
plugin = blockdev.PluginSpec()
plugin.name = blockdev.Plugin.S390
plugin.so_name = None
blockdev.reinit([plugin], reload=False)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
# Every call to sys._current_frames() returns a new dict, so it is not
# modified when threads are created or destroyed. Iterating over it is
# thread safe.
for thread_id, frame in sys._current_frames().items():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text_bytes = dump_text.encode("utf-8")
iutil.eintr_retry_call(os.write, fd, dump_text_bytes)
iutil.eintr_ignore(os.close, fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
# Run the GUI in non-fullscreen mode, so live installs can still
# use the window manager
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass, gui_lock=self.gui_initialized,
fullscreen=False)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root=None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0o755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
|
kparal/anaconda
|
pyanaconda/anaconda.py
|
Python
|
gpl-2.0
| 9,570
| 0.001463
|
"""
WSGI config for Footer project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mihow/footer
|
config/wsgi.py
|
Python
|
mit
| 1,706
| 0
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import os.path
import platform
import re
import sys
class Config(object):
'''A Config contains a dictionary that species a build configuration.'''
# Valid values for target_os:
OS_ANDROID = 'android'
OS_CHROMEOS = 'chromeos'
OS_LINUX = 'linux'
OS_MAC = 'mac'
OS_WINDOWS = 'windows'
# Valid values for target_cpu:
ARCH_X86 = 'x86'
ARCH_X64 = 'x64'
ARCH_ARM = 'arm'
def __init__(self, build_dir=None, target_os=None, target_cpu=None,
is_debug=None, is_verbose=None, apk_name='MojoRunner.apk'):
'''Function arguments take precedence over GN args and default values.'''
assert target_os in (None, Config.OS_ANDROID, Config.OS_CHROMEOS,
Config.OS_LINUX, Config.OS_MAC, Config.OS_WINDOWS)
assert target_cpu in (None, Config.ARCH_X86, Config.ARCH_X64,
Config.ARCH_ARM)
assert is_debug in (None, True, False)
assert is_verbose in (None, True, False)
self.values = {
'build_dir': build_dir,
'target_os': self.GetHostOS(),
'target_cpu': self.GetHostCPU(),
'is_debug': True,
'is_verbose': True,
'dcheck_always_on': False,
'is_asan': False,
'apk_name': apk_name,
}
self._ParseGNArgs()
if target_os is not None:
self.values['target_os'] = target_os
if target_cpu is not None:
self.values['target_cpu'] = target_cpu
if is_debug is not None:
self.values['is_debug'] = is_debug
if is_verbose is not None:
self.values['is_verbose'] = is_verbose
@staticmethod
def GetHostOS():
if sys.platform == 'linux2':
return Config.OS_LINUX
if sys.platform == 'darwin':
return Config.OS_MAC
if sys.platform == 'win32':
return Config.OS_WINDOWS
raise NotImplementedError('Unsupported host OS')
@staticmethod
def GetHostCPU():
# Derived from //native_client/pynacl/platform.py
machine = platform.machine()
if machine in ('x86', 'x86-32', 'x86_32', 'x8632', 'i386', 'i686', 'ia32',
'32'):
return Config.ARCH_X86
if machine in ('x86-64', 'amd64', 'AMD64', 'x86_64', 'x8664', '64'):
return Config.ARCH_X64
if machine.startswith('arm'):
return Config.ARCH_ARM
raise Exception('Cannot identify CPU arch: %s' % machine)
def _ParseGNArgs(self):
'''Parse the gn config file from the build directory, if it exists.'''
TRANSLATIONS = { 'true': 'True', 'false': 'False', }
if self.values['build_dir'] is None:
return
gn_file = os.path.join(self.values['build_dir'], 'args.gn')
if not os.path.isfile(gn_file):
return
with open(gn_file, 'r') as f:
for line in f:
line = re.sub('\s*#.*', '', line)
result = re.match('^\s*(\w+)\s*=\s*(.*)\s*$', line)
if result:
key = result.group(1)
value = result.group(2)
self.values[key] = ast.literal_eval(TRANSLATIONS.get(value, value))
# Getters for standard fields ------------------------------------------------
@property
def build_dir(self):
'''Build directory path.'''
return self.values['build_dir']
@property
def target_os(self):
'''OS of the build/test target.'''
return self.values['target_os']
@property
def target_cpu(self):
'''CPU arch of the build/test target.'''
return self.values['target_cpu']
@property
def is_debug(self):
'''Is Debug build?'''
return self.values['is_debug']
@property
def is_verbose(self):
'''Should print additional logging information?'''
return self.values['is_verbose']
@property
def dcheck_always_on(self):
'''DCHECK is fatal even in release builds'''
return self.values['dcheck_always_on']
@property
def is_asan(self):
'''Is ASAN build?'''
return self.values['is_asan']
@property
def apk_name(self):
'''Name of the APK file to run'''
return self.values['apk_name']
|
junhuac/MQUIC
|
src/mojo/tools/mopy/config.py
|
Python
|
mit
| 4,103
| 0.014136
|
import random
import datetime
import psycopg2
from collections import deque
from tornado.stack_context import wrap
from tornado.ioloop import IOLoop
from tornado.concurrent import return_future
class WrapCursor:
def __init__(self,db,cur):
self._db = db
self._cur = cur
self._oldcur = None
self._init_member()
def __iter__(self):
return self._cur
@return_future
def execute(self,sql,param = None,callback = None):
def _cb(err = None):
if err != None:
raise err
self.arraysize = self._cur.arraysize
self.itersize = self._cur.itersize
self.rowcount = self._cur.rowcount
self.rownumber = self._cur.rownumber
self.lastrowid = self._cur.lastrowid
self.query = self._cur.query
self.statusmessage = self._cur.statusmessage
callback()
self._db._execute(self._cur,sql,param,_cb)
@return_future
def begin(self,callback):
def _cur_cb(cur,err = None):
if err != None:
self._db._end_tran(cur)
raise err
self._db._execute(cur,'BEGIN;',callback =
lambda err : _exec_cb(cur,err))
def _exec_cb(cur,err = None):
if err != None:
self._db._end_tran(cur)
raise err
self._oldcur = self._cur
self._cur = cur
callback()
assert(self._oldcur == None)
self._db._begin_tran(_cur_cb)
@return_future
def commit(self,callback):
def _cb(err = None):
if err != None:
raise err
self._db._end_tran(self._cur)
self._cur = self._oldcur
self._oldcur = None
callback()
assert(self._oldcur != None)
self._db._execute(self._cur,'COMMIT;',callback = _cb)
@return_future
def rollback(self,callback):
def _cb(err = None):
if err != None:
raise err
self._db._end_tran(self._cur)
self._cur = self._oldcur
self._oldcur = None
callback()
assert(self._oldcur != None)
self._db._execute(self._cur,'ROLLBACK;',callback = _cb)
def _init_member(self):
self.fetchone = self._cur.fetchone
self.fetchmany = self._cur.fetchmany
self.fetchall = self._cur.fetchall
self.scroll = self._cur.scroll
self.cast = self._cur.cast
self.tzinfo_factory = self._cur.tzinfo_factory
self.arraysize = 0
self.itersize = 0
self.rowcount = 0
self.rownumber = 0
self.lastrowid = None
self.query = ''
self.statusmessage = ''
class AsyncPG:
def __init__(self,dbname,dbuser,dbpasswd,
dbschema = 'public',dbtz = '+0'):
self.INITCONN_SHARE = 4
self.INITCONN_FREE = 16
self.OPER_CURSOR = 0
self.OPER_EXECUTE = 1
self._ioloop = IOLoop.instance()
self._dbname = dbname
self._dbuser = dbuser
self._dbpasswd = dbpasswd
self._dbschema = dbschema
self._dbtz = dbtz
self._share_connpool = []
self._free_connpool = []
self._conn_fdmap = {}
class _InfDateAdapter:
def __init__(self,wrapped):
self.wrapped = wrapped
def getquoted(self):
if self.wrapped == datetime.datetime.max:
return b"'infinity'::date"
elif self.wrapped == datetime.datetime.min:
return b"'-infinity'::date"
else:
return psycopg2.extensions.TimestampFromPy(
self.wrapped).getquoted()
psycopg2.extensions.register_adapter(datetime.datetime,_InfDateAdapter)
for i in range(self.INITCONN_SHARE):
conn = self._create_conn()
self._share_connpool.append(conn)
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
for i in range(self.INITCONN_FREE):
conn = self._create_conn()
self._free_connpool.append(conn)
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
@return_future
def cursor(self,callback):
def _cb(cur,err = None):
if err != None:
raise err
callback(WrapCursor(self,cur))
self._cursor(callback = _cb)
def _cursor(self,conn = None,callback = None):
def _cb(err = None):
if err != None:
callback(None,err)
callback(conn[4].cursor())
if conn == None:
conn = self._share_connpool[
random.randrange(len(self._share_connpool))]
conn[1].append((self.OPER_CURSOR,None,wrap(_cb)))
if conn[2] == False:
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
def _execute(self,cur,sql,param = (),callback = None):
conn = self._conn_fdmap[cur.connection.fileno()]
conn[1].append((self.OPER_EXECUTE,(cur,sql,param),wrap(callback)))
if conn[2] == False:
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
def _begin_tran(self,callback):
if len(self._free_connpool) == 0:
conn = self._create_conn()
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
else:
conn = self._free_connpool.pop()
self._cursor(conn,callback)
def _end_tran(self,cur):
conn = self._conn_fdmap[cur.connection.fileno()]
if len(self._free_connpool) < self.INITCONN_FREE:
self._free_connpool.append(conn)
else:
self._close_conn(conn)
def _create_conn(self):
dbconn = psycopg2.connect(database = self._dbname,
user = self._dbuser,
password = self._dbpasswd,
async = 1,
options = (
'-c search_path=%s '
'-c timezone=%s'
)%(self._dbschema,self._dbtz))
conn = [dbconn.fileno(),deque(),False,None,dbconn]
self._conn_fdmap[conn[0]] = conn
return conn
def _close_conn(self,conn):
self._conn_fdmap.pop(conn[0],None)
self._ioloop.remove_handler(conn[0])
conn[4].close()
def _dispatch(self,fd,evt):
err = None
try:
conn = self._conn_fdmap[fd]
except KeyError:
self._ioloop.remove_handler(fd)
return
try:
stat = conn[4].poll()
except Exception as e:
err = e
if err != None or stat == psycopg2.extensions.POLL_OK:
self._ioloop.update_handler(fd,IOLoop.ERROR)
elif stat == psycopg2.extensions.POLL_READ:
self._ioloop.update_handler(fd,IOLoop.READ | IOLoop.ERROR)
return
elif stat == psycopg2.extensions.POLL_WRITE:
self._ioloop.update_handler(fd,IOLoop.WRITE | IOLoop.ERROR)
return
cb = conn[3]
if cb != None:
conn[3] = None
cb(err)
else:
try:
oper,data,cb = conn[1].popleft()
except IndexError:
conn[2] = False
return
try:
if oper == self.OPER_CURSOR:
conn[3] = cb
elif oper == self.OPER_EXECUTE:
cur,sql,param = data
cur.execute(sql,param)
conn[3] = cb
except Exception as e:
conn[3] = None
cb(e)
self._ioloop.add_callback(self._dispatch,fd,0)
|
pzread/sdup
|
pg.py
|
Python
|
mit
| 8,196
| 0.0194
|
import simplejson as json
import urllib
import urllib2
import time
server = ""
def GET(uri, params):
params = urllib.urlencode(params)
req = urllib2.Request(server + uri + "?" + params , headers={'Accept': 'application/json'})
return json.loads(urllib2.urlopen(req).read())
def POST(uri, params):
params = json.dumps(params)
req = urllib2.Request(server + uri, params, headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
response = json.loads(urllib2.urlopen(req).read())
return response["id"]
def set_server_url(url):
global server
server = url
class Detector:
def __init__(self, name, url):
self.name = name
self.url = url
def get_id(self):
try:
return self.id
except AttributeError:
try:
detectors = GET("/detectors/", {'name': self.name})
self.id = detectors[0]['id']
except urllib2.HTTPError as e:
self.id = POST("/detectors/", {'name': self.name, 'url': self.url})
return self.id
def realize(self):
self.get_id()
class Metric:
def __init__(self, name, descr, detector):
self.name = name
self.descr = descr
self.detector = detector
def get_id(self):
try:
return self.id
except AttributeError:
uri = "/detectors/" + str(self.detector.get_id()) + "/metrics/"
try:
metrics = GET(uri, {'name': self.name})
return metrics[0]['id']
except urllib2.HTTPError as e:
return POST(uri, {'name': self.name, 'description': self.descr})
def realize(self):
self.get_id()
def post_alert(detector, metric, payload, emails="", date=time.strftime("%Y-%m-%d")):
try:
payload = json.dumps(payload)
uri = "/detectors/" + str(detector.get_id()) + "/metrics/" + str(metric.get_id()) + "/alerts/"
return POST(uri, {'description': payload, 'date': date, 'emails': emails})
except urllib2.HTTPError as e:
if e.code == 422:
print "Alert for detector: " + detector.name + ", metric: " + metric.name + ", has already been submitted!"
else:
raise e
if __name__ == "__main__":
set_server_url("http://localhost:8080")
detector = Detector("Histogram Regression Detector", "foobar")
metric = Metric("metric100", "foobar", detector)
post_alert(detector, metric, "foobar")
|
mozilla/iacomus-alerts
|
python/poster.py
|
Python
|
epl-1.0
| 2,563
| 0.006633
|
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Don Welch
#
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ReadOnlyRadioButton(QRadioButton):
def __init__(self, parent):
QRadioButton.__init__(self, parent)
self.setFocusPolicy(Qt.NoFocus)
self.clearFocus()
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mousePressEvent(e)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mouseReleaseEvent(e)
def mouseMoveEvent(self, e):
return
def keyPressEvent(self, e):
if e.key() not in (Qt.Key_Up, Qt.Key_Left, Qt.Key_Right,
Qt.Key_Down, Qt.Key_Escape):
return
QRadioButton.keyPressEvent(e)
def keyReleaseEvent(self, e):
return
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/hplip/ui4/readonlyradiobutton.py
|
Python
|
gpl-3.0
| 1,657
| 0.004225
|
from __future__ import print_function
from __future__ import division
import numpy as np
import sys
import argparse
import time
import re
import gzip
import os
import logging
from collections import defaultdict
from operator import itemgetter
__version__ = "1.0"
def main():
parser=argparse.ArgumentParser(description='vcf2fasta (diploid)',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--ref', dest='ref_fasta', type=str, required=True, help='input reference file (fasta)')
parser.add_argument('-v', '--vcf', dest='vcf_file', type=str, required=True, help='input vcf file (vcf)')
parser.add_argument('-n', '--name', dest='name', type=str, required=True, help='sample name (column header)')
parser.add_argument('--verbose', dest='verbose', action='count', help='Increase verbosity (specify multiple times for more)')
parser.add_argument('--version', action='version', version='%(prog)s '+__version__)
args=parser.parse_args()
ref_fasta=args.ref_fasta
vcf_file=args.vcf_file
name=args.name
verbose=args.verbose
log_level = logging.WARNING
if verbose == 1:
log_level = logging.INFO
elif verbose >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
verboseprint = print if verbose else lambda *a, **k: None
# process VCF file
verboseprint("processing VCF file")
snps=defaultdict(list)
snp_count=0
last_chrom=None
chrs=dict()
# open vcf file
if vcf_file.endswith('.gz'):
vcf_fh=gzip.open(vcf_file,'r')
else:
vcf_fh=open(vcf_file,'r')
# iterate over vcf file
for linenum,line in enumerate(vcf_fh):
if line.startswith("##"): # skip headers
continue
line=line.rstrip("\n")
# get header line
if line.startswith("#"):
header=line.lstrip("#").split("\t");
header2index=dict([(h,i) for i,h in enumerate(header)])
# ensure FORMAT column is included
if "FORMAT" not in header2index:
print("FORMAT field not specified in VCF file!")
print(header2index)
sys.exit('error')
# ensure user-specified sample name column is included
if name not in header2index:
print(name,"field not specified in VCF file!")
print(header2index)
sys.exit('error')
continue
tmp=line.split("\t")
genotype=tmp[header2index[name]]
format=tmp[header2index["FORMAT"]].split(":")
index2field=dict([(f,i) for i,f in enumerate(format)])
# ensure GT field id included in FORMAT column
if "GT" not in index2field:
print("GT field not specified in FORMAT!")
print(index2field)
sys.exit('error')
genotype_list=genotype.split(":")
gt=genotype_list[index2field["GT"]]
pattern = re.compile('[\|\/]')
(a,b) = pattern.split(gt)
if(a != b):
sys.exit('error: non-homo SNP found @ line# '+linenum+'\n')
c=a=b
c=int(c)
chrom=tmp[header2index["CHROM"]]
pos=int(tmp[header2index["POS"]])
ref=tmp[header2index["REF"]]
alt=tmp[header2index["ALT"]].split(",")
if(c == 0):
snps["chr"+chrom].append((pos,ref))
snp_count+=1
elif(c >= 1):
snps["chr"+chrom].append((pos,alt[c-1]))
snp_count+=1
if(chrom != last_chrom):
if(chrom not in chrs):
verboseprint("\tchr",chrom)
chrs[chrom]=1
last_chrom=chrom
vcf_fh.close()
verboseprint("found",snp_count,"snps")
# process VCF file
verboseprint("")
# ensure all snps are sorted by position (sorted seperatley for each chromosome)
verboseprint("sorting by position")
for chr in snps: # ensure sorted by pos
snp_positions=snps[chr]
verboseprint("\t",chr," ... ",len(snp_positions)," snps",sep="")
sorted_snp_positions=sorted(snp_positions, key=itemgetter(0))
snps[chr]=sorted_snp_positions
verboseprint("")
# process REFERENCE file
verboseprint("processing REF file")
# get output name
ref_fasta_name=os.path.basename(ref_fasta)
ref_fasta_name=re.sub(".gz", "", ref_fasta_name)
ref_fasta_name=re.sub(".fasta", "", ref_fasta_name)
ref_fasta_name=re.sub(".fa", "", ref_fasta_name)
out_fh=open(ref_fasta_name+'__'+name+'.fa',"w")
placed_snps=0
total_placed_snps=0
current_snp=(None,None)
pos=1
last_chrom=None
tmp_pos_list=[(None,None)]
# open reference fasta file
if ref_fasta.endswith('.gz'):
ref_fh=gzip.open(ref_fasta,'r')
else:
ref_fh=open(ref_fasta,'r')
# iterate over fasta file
for linenum,line in enumerate(ref_fh):
line=line.rstrip("\n")
# search for > (contig name)
regexp = re.compile('>')
if regexp.search(line):
if line.startswith(">"):
chrom=line.lstrip(">")
pos=1
print(line,"-",name,file=out_fh,sep="")
continue
else: # random > found in line - issue with cat ?
sys.exit('error with fasta file'+'\n'+str(line))
if(chrom != last_chrom):
tmp_pos_list=[]
if(last_chrom != None):
verboseprint(" ... ",placed_snps," / ",possible_snps,sep="")
tmp_pos_list=[(None,None)]
possible_snps=0
if(chrom in snps):
tmp_pos_list=snps[chrom]
possible_snps=len(tmp_pos_list)
verboseprint("\t",chrom,sep="",end="")
current_snp=tmp_pos_list.pop(0)
total_placed_snps += placed_snps
placed_snps=0
tmp_len=len(line)
start=pos
end=pos+tmp_len-1
while((current_snp[0] < start) and (len(tmp_pos_list) > 0)):
print("ERROR: missed snp!",current_snp,"\t",start,"-",end,">",current_snp[0])
current_snp=tmp_pos_list.pop(0)
if((current_snp[0] == None) or (current_snp[0] > end)):
print(line,file=out_fh)
else:
char_list=list(line)
snp_offset=current_snp[0]-start
if((snp_offset < 0) or (snp_offset > len(char_list))): # check to ensure SNP overlaps interval
sys.exit('error '+str(current_snp)+' '+str(snp_offset)+' '+str(start)+'-'+str(end))
# replace snp in char arr
char_list[snp_offset]=current_snp[1]
placed_snps+=1
if(len(tmp_pos_list) == 0):
current_snp=(None,None)
# handle multiple SNPs per FASTA line (normally 50 chars /buffer/)
if(len(tmp_pos_list) > 0):
current_snp=tmp_pos_list.pop(0)
while((current_snp[0] <= end) and (len(tmp_pos_list) > 0)):
snp_offset=current_snp[0]-start
# replace snp in char arr
char_list[snp_offset]=current_snp[1]
placed_snps+=1
current_snp=tmp_pos_list.pop(0)
if((current_snp[0] <= end) and (len(tmp_pos_list) == 0)):
snp_offset=current_snp[0]-start
char_list[snp_offset]=current_snp[1]
placed_snps+=1
current_snp=(None,None)
# char list to string, and print
print(''.join(char_list),file=out_fh)
pos += tmp_len
last_chrom=chrom
ref_fh.close()
# handle last line
verboseprint(" ... ",last_chrom," ",placed_snps," / ",len(snps[last_chrom]),sep="")
# process REFERENCE file
if __name__=="__main__":
main()
|
blajoie/vcf2fasta
|
scripts/vcf2fasta.py
|
Python
|
apache-2.0
| 8,351
| 0.025626
|
import socket
import pytest
import mock
from pygelf import GelfTcpHandler, GelfUdpHandler, GelfHttpHandler, GelfTlsHandler, GelfHttpsHandler
from tests.helper import logger, get_unique_message, log_warning, log_exception
SYSLOG_LEVEL_ERROR = 3
SYSLOG_LEVEL_WARNING = 4
@pytest.fixture(params=[
GelfTcpHandler(host='127.0.0.1', port=12201),
GelfUdpHandler(host='127.0.0.1', port=12202),
GelfUdpHandler(host='127.0.0.1', port=12202, compress=False),
GelfHttpHandler(host='127.0.0.1', port=12203),
GelfHttpHandler(host='127.0.0.1', port=12203, compress=False),
GelfTlsHandler(host='127.0.0.1', port=12204),
GelfHttpsHandler(host='127.0.0.1', port=12205, validate=False),
GelfHttpsHandler(host='localhost', port=12205, validate=True, ca_certs='tests/config/cert.pem'),
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True, ca_certs='tests/config/cert.pem'),
])
def handler(request):
return request.param
def test_simple_message(logger):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_formatted_message(logger):
message = get_unique_message()
template = message + '_%s_%s'
graylog_response = log_warning(logger, template, args=('hello', 'gelf'))
assert graylog_response['message'] == message + '_hello_gelf'
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
def test_full_message(logger):
message = get_unique_message()
try:
raise ValueError(message)
except ValueError as e:
graylog_response = log_exception(logger, message, e)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_ERROR
assert message in graylog_response['full_message']
assert 'Traceback (most recent call last)' in graylog_response['full_message']
assert 'ValueError: ' in graylog_response['full_message']
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_source(logger):
original_source = socket.gethostname()
with mock.patch('socket.gethostname', return_value='different_domain'):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['source'] == original_source
|
keeprocking/pygelf
|
tests/test_common_fields.py
|
Python
|
mit
| 2,884
| 0.001387
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from wsme import types as wtypes
LOG = logging.getLogger(__name__)
API_STATUS = wtypes.Enum(str, 'SUPPORTED', 'CURRENT', 'DEPRECATED')
class Resource(wtypes.Base):
"""REST API Resource."""
@classmethod
def from_dict(cls, d):
# TODO: take care of nested resources
obj = cls()
for key, val in d.items():
if hasattr(obj, key):
setattr(obj, key, val)
return obj
def __str__(self):
"""WSME based implementation of __str__."""
res = "%s [" % type(self).__name__
first = True
for attr in self._wsme_attributes:
if not first:
res += ', '
else:
first = False
res += "%s='%s'" % (attr.name, getattr(self, attr.name))
return res + "]"
|
nmakhotkin/mistral-extra
|
examples/webhooks/api/controllers/resource.py
|
Python
|
apache-2.0
| 1,470
| 0
|
# Copyright the Karmabot authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'karmabot' and is distributed under the BSD license.
# See LICENSE for more details.
# dedicated to LC
from json import JSONDecoder
from urllib import urlencode
from urllib2 import urlopen
from karmabot.core.client import thing
from karmabot.core.commands.sets import CommandSet
from karmabot.core.register import facet_registry
from karmabot.core.facets import Facet
import re
import htmlentitydefs
##
# Function Placed in public domain by Fredrik Lundh
# http://effbot.org/zone/copyright.htm
# http://effbot.org/zone/re-sub.htm#unescape-html
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
# leave as is
return text
return re.sub("&#?\w+;", fixup, text)
@facet_registry.register
class LmgtfyFacet(Facet):
name = "lmgtfy"
commands = thing.add_child(CommandSet(name))
@classmethod
def does_attach(cls, thing):
return thing.name == "lmgtfy"
@commands.add(u"lmgtfy {item}",
u"googles for a {item}")
def lmgtfy(self, context, item):
api_url = "http://ajax.googleapis.com/ajax/services/search/web?"
response = urlopen(api_url + urlencode(dict(v="1.0",
q=item)))
response = dict(JSONDecoder().decode(response.read()))
top_result = {}
if response.get('responseStatus') == 200:
results = response.get('responseData').get('results')
top_result = results.pop(0)
context.reply(", ".join([unescape(top_result.get('titleNoFormatting')),
top_result.get('unescapedUrl'),
]))
|
chromakode/karmabot
|
karmabot/extensions/lmgtfy.py
|
Python
|
bsd-3-clause
| 2,423
| 0.000413
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import asarray_chkfinite
from .misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from scipy._lib.six import callable
__all__ = ['qz']
_double_precision = ['i','l','d']
def _select_function(sort, typ):
if typ in ['F','D']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y: (np.real(x/y) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y: (np.real(x/y) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y: (abs(x/y) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y: (abs(x/y) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
elif typ in ['f','d']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
else: # to avoid an error later
raise ValueError("dtype %s not understood" % typ)
return sfunction
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of N x N
nonsymmetric matrices (A,B) is::
(A,B) = (Q*AA*Z', Q*BB*Z')
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
output : str {'real','complex'}
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW, IT DOESN'T WORK WELL ON WINDOWS.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue x = (alphar + alphai*1j)/beta.
For complex matrix pairs or output='complex', the sort function
takes two complex arguments (alpha, beta). The eigenvalue
x = (alpha/beta).
Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : boolean
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
sdim : int, optional
If sorting was requested, a fifth return value will contain the
number of eigenvalues for which the sort condition was True.
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import linalg
>>> np.random.seed(1234)
>>> A = np.arange(9).reshape((3, 3))
>>> B = np.random.randn(3, 3)
>>> AA, BB, Q, Z = linalg.qz(A, B)
>>> AA
array([[-13.40928183, -4.62471562, 1.09215523],
[ 0. , 0. , 1.22805978],
[ 0. , 0. , 0.31973817]])
>>> BB
array([[ 0.33362547, -1.37393632, 0.02179805],
[ 0. , 1.68144922, 0.74683866],
[ 0. , 0. , 0.9258294 ]])
>>> Q
array([[ 0.14134727, -0.97562773, 0.16784365],
[ 0.49835904, -0.07636948, -0.86360059],
[ 0.85537081, 0.20571399, 0.47541828]])
>>> Z
array([[-0.24900855, -0.51772687, 0.81850696],
[-0.79813178, 0.58842606, 0.12938478],
[-0.54861681, -0.6210585 , -0.55973739]])
"""
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None (will "
" change when this functionality is made more robust).")
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
if not (a_m == a_n == b_m == b_n):
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F','D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F','D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1,A))
overwrite_b = overwrite_b or (_datacopied(b1,B))
gges, = get_lapack_funcs(('gges',), (a1,b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
sfunction = _select_function(sort, typa)
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of gges" % -info)
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be correct "
"for J=%d,...,N" % info-1, UserWarning)
elif info == a_n+1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n+2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues in the "
"Generalized Schur form no longer satisfy sort=True. "
"This could also be caused due to scaling.")
elif info == a_n+3:
raise LinAlgError("Reordering failed in <s,d,c,z>tgsen")
# output for real
# AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
# output for complex
# AA, BB, sdim, alphai, beta, vsl, vsr, work, info
if sort_t == 0:
return result[0], result[1], result[-4], result[-3]
else:
return result[0], result[1], result[-4], result[-3], result[2]
|
witcxc/scipy
|
scipy/linalg/_decomp_qz.py
|
Python
|
bsd-3-clause
| 8,764
| 0.004792
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import logging
from oslo_config import cfg
import pkg_resources
import six
from nova.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
cloudbase/nova-virtualbox
|
nova/openstack/common/versionutils.py
|
Python
|
apache-2.0
| 8,784
| 0
|
from exchanges import helpers
from exchanges import kraken
from decimal import Decimal
### Kraken opportunities
#### ARBITRAGE OPPORTUNITY 1
def opportunity_1():
sellLTCbuyEUR = kraken.get_current_bid_LTCEUR()
sellEURbuyXBT = kraken.get_current_ask_XBTEUR()
sellXBTbuyLTC = kraken.get_current_ask_XBTLTC()
opport = 1-((sellLTCbuyEUR/sellEURbuyBTX)*sellXBTbuyLTC)
return Decimal(opport)
def opportunity_2():
sellEURbuyLTC = kraken.get_current_ask_LTCEUR()
sellLTCbuyXBT = kraken.get_current_ask_XBTLTC()
sellXBTbuyEUR = kraken.get_current_bid_XBTEUR()
opport = 1-(((1/sellEURbuyLTC)/sellLTCbuyXBT)*sellXBTbuyEUR)
return Decimal(opport)
|
Humantrashcan/prices
|
exchanges/opportunity_kraken.py
|
Python
|
mit
| 658
| 0.024316
|
import os
from fs import enums
import unittest
class TestEnums(unittest.TestCase):
def test_enums(self):
self.assertEqual(enums.Seek.current, os.SEEK_CUR)
self.assertEqual(enums.Seek.end, os.SEEK_END)
self.assertEqual(enums.Seek.set, os.SEEK_SET)
self.assertEqual(enums.ResourceType.unknown, 0)
|
PyFilesystem/pyfilesystem2
|
tests/test_enums.py
|
Python
|
mit
| 335
| 0
|
#!/usr/bin/env python
from __future__ import print_function
# Use the srilm module
from srilm import *
# Initialize a trigram LM variable (1 = unigram, 2 = bigram and so on)
n = initLM(5)
# Read 'sample.lm' into the LM variable
readLM(n, "corpu.lm")
# How many n-grams of different order are there ?
print("1. Number of n-grams:")
print(" There are {} unigrams in this LM".format(howManyNgrams(n, 1)))
print(" There are {} bigrams in this LM".format(howManyNgrams(n, 2)))
print(" There are {} trigrams in this LM".format(howManyNgrams(n, 3)))
print(" There are {} 4-grams in this LM".format(howManyNgrams(n, 4)))
print(" There are {} 5-grams in this LM".format(howManyNgrams(n, 5)))
print()
# Query the LM for some n-gram log probabilities.
# Note that a SRI language model uses backoff smoothing, so if an n-gram is
# not present in the LM, it will compute it using a smoothed lower-order
# n-gram distribution.
print("2. N-gram log probabilities:")
p1 = getUnigramProb(n, 'Naturverbundenheit')
print(" p('weil') = {}".format(p1))
p2 = getBigramProb(n, 'of the')
print(" p('of the') = {}".format(p2))
p3 = getBigramProb(n, 'Niederlage Deutschlands')
print(" p('Niederlage Deutschlands') = {}".format(p3))
p4 = getTrigramProb(n, 'there are some')
print(" p('there are some') = {}".format(p4))
# generic n-gram probability function
p5 = getNgramProb(n, 'sachin tendulkar .PERIOD', 3)
print(" p('sachinr') = {}".format(p5))
p6 = getNgramProb(n, 'or whatever has yet to', 5)
print(" p('or whatever has yet to') = {}".format(p6))
print()
# Query the LM to get the final log probability for an entire sentence.
# Note that this is different from a n-gram probability because
# (1) For a sentence, SRILM appends <s> and </s> to its beginning
# and the end respectively
# (2) The log prob of a probability is the sum of all individual
# n-gram log probabilities
print("3. Sentence log probabilities and perplexities:")
sprob = getSentenceProb(n,'there are some good',4)
print(" p('there are some good') = {}".format(sprob))
# the perplexity
sppl = getSentencePpl(n,'there are some good', 4)
print(" ppl('there are some good') = {}".format(sppl))
print()
# number of OOVs in a sentence
print("4. OOvs:")
noov = numOOVs(n, 'there are some foobar', 4)
print(" nOOVs('there are some foobar') = {}".format(noov))
print()
# Query the LM to get the total log probability for the file named 'corpus'
print("5. Corpus log probabilties and perplexities:")
corpus = 'test.txt'
corpus_prob = getCorpusProb(n, corpus)
print(" Logprob for the file {} = {}".format(corpus, corpus_prob))
# Query the LM to get the perplexity for the file named 'corpus'
corpus_ppl = getCorpusPpl(n, corpus);
print(" Perplexity for the file {} = {}".format(corpus, corpus_ppl))
# Free LM variable
deleteLM(n);
|
sureshbvn/nlpProject
|
nGramModel/test.py
|
Python
|
mit
| 2,837
| 0.004582
|
from flask import Flask
from flask import request
from flask.ext.sqlalchemy import SQLAlchemy
import datetime
import uuid as uid
import sys
import requests
import urllib2
GOIP_SERVER_IP = '127.0.0.1' #'172.248.114.178'
TELEPHONY_SERVER_IP = '127.0.0.1:5000/sms/in'
sys.path.append('/home/csik/public_python/sms_server/deploy') #move
app = Flask(__name__)
from rootio.extensions import db
from config import SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
from rootio.telephony.models import PhoneNumber, Message
def debug(request):
if request.method == 'POST':
deets = request.form.items()
print >> sys.stderr, type(deets)
deets_method = 'POST'
else:
deets = request.args.items()
print >> sys.stderr, type(deets)
deets_method = 'GET'
s = ""
#print "({0}) parameters via {1}".format(len(deets)-1, deets_method)
for deet in deets:
s += str(deet)
print s
return deets
@app.route("/", methods=['GET', 'POST'])
def hello():
debug(request)
return "Hello World!"
@app.route("/init_goip", methods=['GET', 'POST'])
def init_goip():
try:
import send_sms_GOIP
if not send_sms_GOIP.create_flags():
raise Exception("Wrong machine")
except:
print "Unable to init GOIP -- are you sure you called the right machine?"
return "Unable to init GOIP", 404
@app.route("/out", methods=['GET', 'POST'])
def sms_out():
"""
Handles outgoing message requests.
Currently only from GOIP8, should be generalized to any type of sending unit, called by station.
Expected args: line, to_number, message
"""
try:
import send_sms_GOIP
except:
print "Unable to init GOIP -- are you sure you called the right machine?"
return "Unable to init GOIP", 404
debug(request)
line = request.args.get('line')
to_number = request.args.get('to_number')
message = request.args.get('message')
if not line or not to_number or not message:
print "Insufficient number of arguments!"
return "False"
if not send_sms_GOIP.send(line,to_number,message):
print "Uh Oh, some kind of error in send_sms_GOIP"
return "False"
else:
return "Sent!"
@app.route("/in/", methods=['GET', 'POST'])
def sms_in():
"""
Handles incoming messages.
Currently getting incoming messages from GOIP8, routed to extension 1100 which triggers handle_chat.py
Expected args: Event-Date-Timestamp (Unix epoch), from, to, from_number, body
"""
debug(request)
uuid = uid.uuid5(uid.NAMESPACE_DNS, 'rootio.org')
edt = datetime.datetime.fromtimestamp(int(request.args.get('Event-Date-Timestamp'))/1000000) #.strftime('%Y-%m-%d %H:%M:%S')
fr = request.args.get('from') #This line should look up the station through its from address
to = request.args.get('to') #This will be the same for all related units -- again may make sense to have a representation of sending units
from_number = request.args.get('from_number') #look up a number now? Save a foreign key
body = request.args.get('body')
payload = { 'uuid': uuid,
'edt': edt,
'fr': fr,
'to': to,
'from_number': from_number,
'body': body,
}
r= requests.get(TELEPHONY_SERVER_IP,params=payload)
print r.text
return "looks alright " + str(uuid)
#return str(str(edt)+'\n'+fr+'->'+to+'\n'+from_number+'\n'+body+'\n'+uuid)
if __name__ == "__main__":
app.run(debug=True)
r = requests.get('http://'+GOIP_SERVER_IP+'/init_goip')
|
rootio/rootio_telephony
|
sms_utils/sms_server.py
|
Python
|
agpl-3.0
| 3,873
| 0.018848
|
# Copyright (C) 2013-2020 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Wlodzimierz Wencel
config_file_set = {
#number : [named.conf, rndc.conf, fwd.db, rev.db ]
1: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
2: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR dns6-1.six.example.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
3: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha1.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha1.key; };
allow-query { any; };
};
key "forge.sha1.key" {
algorithm hmac-sha1;
secret "PN4xKZ/jDobCMlo4rpr70w==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
4: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha224.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha224.key; };
allow-query { any; };
};
key "forge.sha224.key" {
algorithm hmac-sha224;
secret "TxAiO5TRKkFyHSCa4erQZQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
5: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha256.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha256.key; };
allow-query { any; };
};
key "forge.sha256.key" {
algorithm hmac-sha256;
secret "5AYMijv0rhZJyQqK/caV7g==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
6: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha384.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha384.key; };
allow-query { any; };
};
key "forge.sha384.key" {
algorithm hmac-sha384;
secret "21upyvp7zcG0S2PB4+kuQQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
6: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha384.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha384.key; };
allow-query { any; };
};
key "forge.sha384.key" {
algorithm hmac-sha384;
secret "21upyvp7zcG0S2PB4+kuQQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
7: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
8: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; };
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
9: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; };
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
10: ["", "", "", ""],
11: ["", "", "", ""],
12: ["", "", "", ""],
13: ["", "", "", ""],
14: ["", "", "", ""],
15: ["", "", "", ""],
## v4 configs!
20: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
21: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha1.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha1.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.sha1.key" {
algorithm hmac-sha1;
secret "PN4xKZ/jDobCMlo4rpr70w==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
22: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha224.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha224.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.sha224.key" {
algorithm hmac-sha224;
secret "TxAiO5TRKkFyHSCa4erQZQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
23: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha256.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha256.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.sha256.key" {
algorithm hmac-sha256;
secret "5AYMijv0rhZJyQqK/caV7g==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
24: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha384.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha384.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.sha384.key" {
algorithm hmac-sha384;
secret "21upyvp7zcG0S2PB4+kuQQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
25: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha512.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
26: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.md5.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""],
27: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.md5.key; };
allow-transfer { any; };
allow-query { any; }; // This is the default
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
"""], 31: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "a.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "b.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev2.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "c.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev3.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
zone "abc.example.com" {
type master;
file "fwd2.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
zone "xyz.example.com" {
type master;
file "fwd3.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:a::1
nanny6 AAAA 2001:db8:a::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
a.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.a.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
""", """$ORIGIN .
$TTL 86400 ; 1 day
abc.example.com IN SOA dns6-1.abc.example.com. mail.abc.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.abc.example.com.
$ORIGIN abc.example.com.
dns6-1 AAAA 2001:db8:b::1
nanny6 AAAA 2001:db8:b::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
b.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.abc.example.com. mail.abc.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.abc.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.b.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.abc.exmaple.com.b.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
""", """$ORIGIN .
$TTL 86400 ; 1 day
xyz.example.com IN SOA dns6-1.xyz.example.com. mail.xyz.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.xyz.example.com.
$ORIGIN xyz.example.com.
dns6-1 AAAA 2001:db8:c::1
nanny6 AAAA 2001:db8:c::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
c.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.xyz.example.com. mail.xyz.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.xyz.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.c.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.xyz.exmaple.com.c.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""], 32: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "51.168.192.in-addr.arpa." {
type master;
file "rev2.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "52.168.192.in-addr.arpa." {
type master;
file "rev3.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
zone "five.example.com" {
type master;
file "fwd2.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
zone "three.example.com" {
type master;
file "fwd3.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
four.example.com IN SOA dns.four.example.com. mail.four.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.four.example.com.
$ORIGIN four.example.com.
dns A 172.16.1.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.four.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.four.example.com.
$ORIGIN 50.168.192.in-addr.arpa.
1 IN PTR dns1.four.example.com.
""", """$ORIGIN .
$TTL 86400 ; 1 day
five.example.com IN SOA dns.five.example.com. mail.five.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.five.example.com.
$ORIGIN five.example.com.
dns A 192.168.51.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.five.example.com. hostmaster.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.five.example.com.
$ORIGIN 51.168.192.in-addr.arpa.
1 IN PTR dns1.five.example.com.
""", """$ORIGIN .
$TTL 86400 ; 1 day
three.example.com IN SOA dns.three.example.com. mail.three.example.com. (
106 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns.three.example.com.
$ORIGIN three.example.com.
dns A 192.168.52.1
""", """$TTL 1h ; Default TTL
@ IN SOA dns1.three.example.com. hostmaster.three.example.com. (
100 ; serial
1h ; slave refresh interval
15m ; slave retry interval
1w ; slave copy expire time
1h ; NXDOMAIN cache time
)
NS dns1.three.example.com.
$ORIGIN 52.168.192.in-addr.arpa.
1 IN PTR dns1.three.example.com.
"""]}
keys = '''/* $Id: bind.keys,v 1.7 2011/01/03 23:45:07 each Exp $ */
# The bind.keys file is used to override the built-in DNSSEC trust anchors
# which are included as part of BIND 9. As of the current release, the only
# trust anchors it contains are those for the DNS root zone ("."), and for
# the ISC DNSSEC Lookaside Validation zone ("dlv.isc.org"). Trust anchors
# for any other zones MUST be configured elsewhere; if they are configured
# here, they will not be recognized or used by named.
#
# The built-in trust anchors are provided for convenience of configuration.
# They are not activated within named.conf unless specifically switched on.
# To use the built-in root key, set "dnssec-validation auto;" in
# named.conf options. To use the built-in DLV key, set
# "dnssec-lookaside auto;". Without these options being set,
# the keys in this file are ignored.
#
# This file is NOT expected to be user-configured.
#
# These keys are current as of January 2011. If any key fails to
# initialize correctly, it may have expired. In that event you should
# replace this file with a current version. The latest version of
# bind.keys can always be obtained from ISC at https://www.isc.org/bind-keys.
managed-keys {
# ISC DLV: See https://www.isc.org/solutions/dlv for details.
# NOTE: This key is activated by setting "dnssec-lookaside auto;"
# in named.conf.
dlv.isc.org. initial-key 257 3 5 "BEAAAAPHMu/5onzrEE7z1egmhg/WPO0+juoZrW3euWEn4MxDCE1+lLy2
brhQv5rN32RKtMzX6Mj70jdzeND4XknW58dnJNPCxn8+jAGl2FZLK8t+
1uq4W+nnA3qO2+DL+k6BD4mewMLbIYFwe0PG73Te9fZ2kJb56dhgMde5
ymX4BI/oQ+cAK50/xvJv00Frf8kw6ucMTwFlgPe+jnGxPPEmHAte/URk
Y62ZfkLoBAADLHQ9IrS2tryAe7mbBZVcOwIeU/Rw/mRx/vwwMCTgNboM
QKtUdvNXDrYJDSHZws3xiRXF1Rf+al9UmZfSav/4NWLKjHzpT59k/VSt
TDN0YUuWrBNh";
# ROOT KEY: See https://data.iana.org/root-anchors/root-anchors.xml
# for current trust anchor information.
# NOTE: This key is activated by setting "dnssec-validation auto;"
# in named.conf.
. initial-key 257 3 8 "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF
FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX
bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD
X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz
W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS
Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq
QxA+Uk1ihz0=";
};'''
|
isc-projects/forge
|
tests/softwaresupport/bind9_server/bind_configs.py
|
Python
|
isc
| 54,716
| 0.009668
|
import sys
import os
import torch
import time
from engine import TetrisEngine
from dqn_agent import DQN, ReplayMemory, Transition
from torch.autograd import Variable
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
width, height = 10, 20 # standard tetris friends rules
engine = TetrisEngine(width, height)
def load_model(filename):
model = DQN()
if use_cuda:
model.cuda()
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
return model
def run(model):
state = FloatTensor(engine.clear()[None,None,:,:])
score = 0
while True:
action = model(Variable(state,
volatile=True).type(FloatTensor)).data.max(1)[1].view(1,1).type(LongTensor)
print( model(Variable(state,
volatile=True).type(FloatTensor)).data)
state, reward, done = engine.step(action[0,0])
state = FloatTensor(state[None,None,:,:])
# Accumulate reward
score += int(reward)
print(engine)
print(action)
time.sleep(.1)
if done:
print('score {0}'.format(score))
break
if len(sys.argv) <= 1:
print('specify a filename to load the model')
sys.exit(1)
if __name__ == '__main__':
filename = sys.argv[1]
if os.path.isfile(filename):
print("=> loading model '{}'".format(filename))
model = load_model(filename).eval()
run(model)
else:
print("=> no file found at '{}'".format(filename))
|
jaybutera/tetrisRL
|
run_model.py
|
Python
|
mit
| 1,627
| 0.009834
|
import pandas as pd
import logging
import settings
import os
from scikits.audiolab import oggwrite, play, oggread
from scipy.fftpack import dct
from itertools import chain
import numpy as np
import math
log = logging.getLogger(__name__)
def read_sound(fpath, limit=settings.MUSIC_TIME_LIMIT):
try:
data, fs, enc = oggread(fpath)
upto = fs * limit
except IOError:
log.error("Could not read file at {0}".format(fpath))
raise IOError
if data.shape[0] < upto:
log.error("Music file at {0} not long enough.".format(fpath))
raise ValueError
try:
if len(data.shape) == 1 or data.shape[1] != 2:
data = np.vstack([data, data]).T
except Exception:
log.error("Invalid dimension count for file at {0}. Do you have left and right channel audio?".format(fpath))
raise ValueError
data = data[0:upto, :]
return data, fs, enc
def calc_slope(x, y):
x_mean = np.mean(x)
y_mean = np.mean(y)
x_dev = np.sum(np.abs(np.subtract(x, x_mean)))
y_dev = np.sum(np.abs(np.subtract(y, y_mean)))
slope = (x_dev * y_dev) / (x_dev * x_dev)
return slope
def get_indicators(vec):
mean = np.mean(vec)
slope = calc_slope(np.arange(len(vec)), vec)
std = np.std(vec)
return mean, slope, std
def calc_u(vec):
fft = np.fft.fft(vec)
return np.sum(np.multiply(fft, vec)) / np.sum(vec)
def calc_mfcc(fft):
ps = np.abs(fft) ** 2
fs = np.dot(ps, mel_filter(ps.shape[0]))
ls = np.log(fs)
ds = dct(ls, type=2)
return ds
def mel_filter(blockSize):
numBands = 13
maxMel = int(freqToMel(24000))
minMel = int(freqToMel(10))
filterMatrix = np.zeros((numBands, blockSize))
melRange = np.array(xrange(numBands + 2))
melCenterFilters = melRange * (maxMel - minMel) / (numBands + 1) + minMel
aux = np.log(1 + 1000.0 / 700.0) / 1000.0
aux = (np.exp(melCenterFilters * aux) - 1) / 22050
aux = 0.5 + 700 * blockSize * aux
aux = np.floor(aux) # Arredonda pra baixo
centerIndex = np.array(aux, int) # Get int values
for i in xrange(numBands):
start, center, end = centerIndex[i:(i + 3)]
k1 = np.float32(center - start)
k2 = np.float32(end - center)
up = (np.array(xrange(start, center)) - start) / k1
down = (end - np.array(xrange(center, end))) / k2
filterMatrix[i][start:center] = up
try:
filterMatrix[i][center:end] = down
except ValueError:
pass
return filterMatrix.transpose()
def freqToMel(freq):
return 1127.01048 * math.log(1 + freq / 700.0)
def melToFreq(freq):
return 700 * (math.exp(freq / 1127.01048 - 1))
def calc_features(vec, freq):
# bin count
bc = settings.MUSIC_TIME_LIMIT
bincount = list(range(bc))
# framesize
fsize = 512
#mean
m = np.mean(vec)
#spectral flux
sf = np.mean(vec - np.roll(vec, fsize))
mx = np.max(vec)
mi = np.min(vec)
sdev = np.std(vec)
binwidth = len(vec) / bc
bins = []
for i in xrange(0, bc):
bins.append(vec[(i * binwidth):(binwidth * i + binwidth)])
peaks = [np.max(i) for i in bins]
mins = [np.min(i) for i in bins]
amin, smin, stmin = get_indicators(mins)
apeak, speak, stpeak = get_indicators(peaks)
#fft = np.fft.fft(vec)
bin_fft = []
for i in xrange(0, bc):
bin_fft.append(np.fft.fft(vec[(i * binwidth):(binwidth * i + binwidth)]))
mel = [list(calc_mfcc(j)) for (i, j) in enumerate(bin_fft) if i % 3 == 0]
mels = list(chain.from_iterable(mel))
cepstrums = [np.fft.ifft(np.log(np.abs(i))) for i in bin_fft]
inter = [get_indicators(i) for i in cepstrums]
acep, scep, stcep = get_indicators([i[0] for i in inter])
aacep, sscep, stsscep = get_indicators([i[1] for i in inter])
zero_crossings = np.where(np.diff(np.sign(vec)))[0]
zcc = len(zero_crossings)
zccn = zcc / freq
u = [calc_u(i) for i in bins]
spread = np.sqrt(u[-1] - u[0] ** 2)
skewness = (u[0] ** 3 - 3 * u[0] * u[5] + u[-1]) / spread ** 3
#Spectral slope
#ss = calc_slope(np.arange(len(fft)),fft)
avss = [calc_slope(np.arange(len(i)), i) for i in bin_fft]
savss = calc_slope(bincount, avss)
mavss = np.mean(avss)
features = [m, sf, mx, mi, sdev, amin, smin, stmin, apeak, speak, stpeak, acep, scep, stcep, aacep, sscep, stsscep,
zcc, zccn, spread, skewness, savss, mavss] + mels + [i[0] for (j, i) in enumerate(inter) if j % 5 == 0]
for i in xrange(0, len(features)):
try:
features[i] = features[i].real
except Exception:
pass
return features
def extract_features(sample, freq):
left = calc_features(sample[:, 0], freq)
right = calc_features(sample[:, 1], freq)
return left + right
def process_song(vec, f):
try:
features = extract_features(vec, f)
except Exception:
log.error("Cannot generate features for file {0}".format(f))
return None
return features
def generate_features(filepath):
frame = None
data, fs, enc = read_sound(filepath)
features = process_song(data, fs)
frame = pd.Series(features)
frame['fs'] = fs
frame['enc'] = enc
frame['fname'] = filepath
return frame
def generate_train_features():
if not os.path.isfile(settings.TRAIN_FEATURE_PATH):
d = []
encs = []
fss = []
fnames = []
for i, p in enumerate(os.listdir(settings.OGG_DIR)):
if not p.endswith(".ogg"):
continue
log.debug("On file {0}".format(p))
filepath = os.path.join(settings.OGG_DIR, p)
try:
data, fs, enc = read_sound(filepath)
except Exception:
continue
try:
features = process_song(data, fs)
except Exception:
log.error("Could not get features for file {0}".format(p))
continue
d.append(features)
fss.append(fs)
encs.append(enc)
fnames.append(p)
frame = pd.DataFrame(d)
frame['fs'] = fss
frame['enc'] = encs
frame['fname'] = fnames
frame.to_csv(settings.TRAIN_FEATURE_PATH)
else:
frame = pd.read_csv(settings.TRAIN_FEATURE_PATH)
frame = frame.iloc[:, 1:]
return frame
|
VikParuchuri/evolve-music2
|
extract_features.py
|
Python
|
mit
| 6,422
| 0.001557
|
import time
from indy import anoncreds, wallet
import json
import logging
from indy import pool
from src.utils import run_coroutine, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Anoncreds sample -> started")
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex", "height", "name"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
# 4. Issuer create Credential Definition for Schema
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": False})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
# 5. Prover create Master Secret
prover['master_secret_id'] = await anoncreds.prover_create_master_secret(prover['wallet'], None)
# 6. Issuer create Credential Offer
issuer['cred_offer'] = await anoncreds.issuer_create_credential_offer(issuer['wallet'], issuer['cred_def_id'])
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# 7. Prover create Credential Request
prover['cred_req'], prover['cred_req_metadata'] = \
await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
# 8. Issuer create Credential
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
(cred_json, _, _) = await anoncreds.issuer_create_credential(issuer['wallet'], issuer['cred_offer'],
issuer['cred_req'], issuer['cred_values'], None, None)
prover['cred'] = cred_json
# 9. Prover store Credential
await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'], prover['cred'],
prover['cred_def'], None)
# 10. Prover gets Credentials for Proof Request
verifier['proof_req'] = json.dumps({
'nonce': '123432421212',
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'name'}
},
'requested_predicates': {
'predicate1_referent': {'name': 'age', 'p_type': '>=', 'p_value': 18}
}
})
prover['proof_req'] = verifier['proof_req']
# Prover gets Credentials for attr1_referent
prover['cred_search_handle'] = \
await anoncreds.prover_search_credentials_for_proof_req(prover['wallet'], prover['proof_req'], None)
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'attr1_referent', 10)
prover['cred_for_attr1'] = json.loads(creds_for_attr1)[0]['cred_info']
# Prover gets Credentials for predicate1_referent
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'predicate1_referent', 10)
prover['cred_for_predicate1'] = json.loads(creds_for_predicate1)[0]['cred_info']
await anoncreds.prover_close_credentials_search_for_proof_req(prover['cred_search_handle'])
# 11. Prover create Proof for Proof Request
prover['requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {'attr1_referent': {'cred_id': prover['cred_for_attr1']['referent'], 'revealed': True}},
'requested_predicates': {'predicate1_referent': {'cred_id': prover['cred_for_predicate1']['referent']}}
})
schemas_json = json.dumps({prover['schema_id']: json.loads(prover['schema'])})
cred_defs_json = json.dumps({prover['cred_def_id']: json.loads(prover['cred_def'])})
revoc_states_json = json.dumps({})
prover['proof'] = await anoncreds.prover_create_proof(prover['wallet'], prover['proof_req'],
prover['requested_creds'],
prover['master_secret_id'], schemas_json, cred_defs_json,
revoc_states_json)
verifier['proof'] = prover['proof']
# 12. Verifier verify proof
proof = json.loads(verifier['proof'])
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
identifier = proof['identifiers'][0]
verifier['cred_def_id'] = identifier['cred_def_id']
verifier['schema_id'] = identifier['schema_id']
verifier['cred_def'] = store[verifier['cred_def_id']]
verifier['schema'] = store[verifier['schema_id']]
schemas_json = json.dumps({verifier['schema_id']: json.loads(verifier['schema'])})
cred_defs_json = json.dumps({verifier['cred_def_id']: json.loads(verifier['cred_def'])})
revoc_ref_defs_json = "{}"
revoc_regs_json = "{}"
assert await anoncreds.verifier_verify_proof(verifier['proof_req'], verifier['proof'], schemas_json, cred_defs_json,
revoc_ref_defs_json, revoc_regs_json)
# 13. Close and delete Issuer wallet
await wallet.close_wallet(issuer['wallet'])
await wallet.delete_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 14. Close and delete Prover wallet
await wallet.close_wallet(prover['wallet'])
await wallet.delete_wallet(prover['wallet_config'], prover['wallet_credentials'])
logger.info("Anoncreds sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
|
srottem/indy-sdk
|
samples/python/src/anoncreds.py
|
Python
|
apache-2.0
| 7,929
| 0.005171
|
import gevent
import socket
from vnc_api.vnc_api import *
from cfgm_common.vnc_kombu import VncKombuClient
from config_db import *
from cfgm_common.dependency_tracker import DependencyTracker
from reaction_map import REACTION_MAP
import svc_monitor
class RabbitConnection(object):
_REACTION_MAP = REACTION_MAP
def __init__(self, logger, args=None):
self._args = args
self.logger = logger
def _connect_rabbit(self):
rabbit_server = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
self._db_resync_done = gevent.event.Event()
q_name = 'svc_mon.%s' % (socket.gethostname())
self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, self._vnc_subscribe_callback,
self.logger.log)
def _vnc_subscribe_callback(self, oper_info):
self._db_resync_done.wait()
try:
self._vnc_subscribe_actions(oper_info)
except Exception:
svc_monitor.cgitb_error_log(self)
def _vnc_subscribe_actions(self, oper_info):
msg = "Notification Message: %s" % (pformat(oper_info))
self.logger.log_debug(msg)
obj_type = oper_info['type'].replace('-', '_')
obj_class = DBBaseSM.get_obj_type_map().get(obj_type)
if obj_class is None:
return
if oper_info['oper'] == 'CREATE':
obj_dict = oper_info['obj_dict']
obj_id = oper_info['uuid']
obj = obj_class.locate(obj_id)
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
elif oper_info['oper'] == 'UPDATE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
old_dt = None
if obj is not None:
old_dt = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
old_dt.evaluate(obj_type, obj)
else:
obj = obj_class.locate(obj_id)
obj.update()
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
if old_dt:
for resource, ids in old_dt.resources.items():
if resource not in dependency_tracker.resources:
dependency_tracker.resources[resource] = ids
else:
dependency_tracker.resources[resource] = list(
set(dependency_tracker.resources[resource]) |
set(ids))
elif oper_info['oper'] == 'DELETE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is None:
return
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
obj_class.delete(obj_id)
else:
# unknown operation
self.logger.log_error('Unknown operation %s' % oper_info['oper'])
return
if obj is None:
self.logger.log_error('Error while accessing %s uuid %s' % (
obj_type, obj_id))
return
for res_type, res_id_list in dependency_tracker.resources.items():
if not res_id_list:
continue
cls = DBBaseSM.get_obj_type_map().get(res_type)
if cls is None:
continue
for res_id in res_id_list:
res_obj = cls.get(res_id)
if res_obj is not None:
res_obj.evaluate()
|
sajuptpm/contrail-controller
|
src/config/svc-monitor/svc_monitor/rabbit.py
|
Python
|
apache-2.0
| 4,079
| 0.001226
|
"""
Basic iscsi support for Linux host with the help of commands
iscsiadm and tgtadm.
This include the basic operates such as login and get device name by
target name. And it can support the real iscsi access and emulated
iscsi in localhost then access it.
"""
import re
import os
import logging
from avocado.core import exceptions
from avocado.utils import data_factory
from avocado.utils import process
from avocado.utils import path
from . import utils_selinux
from . import utils_net
from . import data_dir
ISCSI_CONFIG_FILE = "/etc/iscsi/initiatorname.iscsi"
def iscsi_get_sessions():
"""
Get the iscsi sessions activated
"""
cmd = "iscsiadm --mode session"
output = process.system_output(cmd, ignore_status=True)
sessions = []
if "No active sessions" not in output:
for session in output.splitlines():
ip_addr = session.split()[2].split(',')[0]
target = session.split()[3]
sessions.append((ip_addr, target))
return sessions
def iscsi_get_nodes():
"""
Get the iscsi nodes
"""
cmd = "iscsiadm --mode node"
output = process.system_output(cmd, ignore_status=True)
pattern = r"(\d+\.\d+\.\d+\.\d+|\[.+\]):\d+,\d+\s+([\w\.\-:\d]+)"
nodes = []
if "No records found" not in output:
nodes = re.findall(pattern, output)
return nodes
def iscsi_login(target_name, portal):
"""
Login to a target with the target name
:param target_name: Name of the target
:params portal: Hostname/Ip for iscsi server
"""
cmd = "iscsiadm --mode node --login --targetname %s" % target_name
cmd += " --portal %s" % portal
output = process.system_output(cmd)
target_login = ""
if "successful" in output:
target_login = target_name
return target_login
def iscsi_node_del(target_name=None):
"""
Delete target node record, if the target name is not set then delete
all target node records.
:params target_name: Name of the target.
"""
node_list = iscsi_get_nodes()
cmd = ''
if target_name:
for node_tup in node_list:
if target_name in node_tup:
cmd = "iscsiadm -m node -o delete -T %s " % target_name
cmd += "--portal %s" % node_tup[0]
process.system(cmd, ignore_status=True)
break
if not cmd:
logging.error("The target '%s' for delete is not in target node"
" record", target_name)
else:
for node_tup in node_list:
cmd = "iscsiadm -m node -o delete -T %s " % node_tup[1]
cmd += "--portal %s" % node_tup[0]
process.system(cmd, ignore_status=True)
def iscsi_logout(target_name=None):
"""
Logout from a target. If the target name is not set then logout all
targets.
:params target_name: Name of the target.
"""
if target_name:
cmd = "iscsiadm --mode node --logout -T %s" % target_name
else:
cmd = "iscsiadm --mode node --logout all"
output = process.system_output(cmd)
target_logout = ""
if "successful" in output:
target_logout = target_name
return target_logout
def iscsi_discover(portal_ip):
"""
Query from iscsi server for available targets
:param portal_ip: Ip for iscsi server
"""
cmd = "iscsiadm -m discovery -t sendtargets -p %s" % portal_ip
output = process.system_output(cmd, ignore_status=True)
session = ""
if "Invalid" in output:
logging.debug(output)
else:
session = output
return session
class _IscsiComm(object):
"""
Provide an interface to complete the similar initialization
"""
def __init__(self, params, root_dir):
"""
common __init__ function used to initialize iSCSI service
:param params: parameters dict for iSCSI
:param root_dir: path for image
"""
self.target = params.get("target")
self.export_flag = False
self.luns = None
self.restart_tgtd = 'yes' == params.get("restart_tgtd", "no")
if params.get("portal_ip"):
self.portal_ip = params.get("portal_ip")
else:
self.portal_ip = "127.0.0.1"
if params.get("iscsi_thread_id"):
self.id = params.get("iscsi_thread_id")
else:
self.id = data_factory.generate_random_string(4)
self.initiator = params.get("initiator")
# CHAP AUTHENTICATION
self.chap_flag = False
self.chap_user = params.get("chap_user")
self.chap_passwd = params.get("chap_passwd")
if self.chap_user and self.chap_passwd:
self.chap_flag = True
if params.get("emulated_image"):
self.initiator = None
emulated_image = params.get("emulated_image")
self.emulated_image = os.path.join(root_dir, emulated_image)
self.device = "device.%s" % os.path.basename(self.emulated_image)
self.emulated_id = ""
self.emulated_size = params.get("image_size")
self.unit = self.emulated_size[-1].upper()
self.emulated_size = self.emulated_size[:-1]
# maps K,M,G,T => (count, bs)
emulated_size = {'K': (1, 1),
'M': (1, 1024),
'G': (1024, 1024),
'T': (1024, 1048576),
}
if emulated_size.has_key(self.unit):
block_size = emulated_size[self.unit][1]
size = int(self.emulated_size) * emulated_size[self.unit][0]
self.emulated_expect_size = block_size * size
self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK"
% (self.emulated_image, size, block_size))
else:
self.device = None
def logged_in(self):
"""
Check if the session is login or not.
"""
sessions = iscsi_get_sessions()
login = False
if self.target in map(lambda x: x[1], sessions):
login = True
return login
def portal_visible(self):
"""
Check if the portal can be found or not.
"""
return bool(re.findall("%s$" % self.target,
iscsi_discover(self.portal_ip), re.M))
def set_initiatorName(self, id, name):
"""
back up and set up the InitiatorName
"""
if os.path.isfile("%s" % ISCSI_CONFIG_FILE):
logging.debug("Try to update iscsi initiatorname")
cmd = "mv %s %s-%s" % (ISCSI_CONFIG_FILE, ISCSI_CONFIG_FILE, id)
process.system(cmd)
fd = open(ISCSI_CONFIG_FILE, 'w')
fd.write("InitiatorName=%s" % name)
fd.close()
process.system("service iscsid restart")
def login(self):
"""
Login session for both real iscsi device and emulated iscsi.
Include env check and setup.
"""
login_flag = False
if self.portal_visible():
login_flag = True
elif self.initiator:
self.set_initiatorName(id=self.id, name=self.initiator)
if self.portal_visible():
login_flag = True
elif self.emulated_image:
self.export_target()
# If both iSCSI server and iSCSI client are on localhost.
# It's necessary to set up the InitiatorName.
if "127.0.0.1" in self.portal_ip:
self.set_initiatorName(id=self.id, name=self.target)
if self.portal_visible():
login_flag = True
if login_flag:
iscsi_login(self.target, self.portal_ip)
def get_device_name(self):
"""
Get device name from the target name.
"""
cmd = "iscsiadm -m session -P 3"
device_name = ""
if self.logged_in():
output = process.system_output(cmd)
pattern = r"Target:\s+%s.*?disk\s(\w+)\s+\S+\srunning" % self.target
device_name = re.findall(pattern, output, re.S)
try:
device_name = "/dev/%s" % device_name[0]
except IndexError:
logging.error(
"Can not find target '%s' after login.", self.target)
else:
logging.error("Session is not logged in yet.")
return device_name
def set_chap_auth_initiator(self):
"""
Set CHAP authentication for initiator.
"""
name_dict = {'node.session.auth.authmethod': 'CHAP'}
name_dict['node.session.auth.username'] = self.chap_user
name_dict['node.session.auth.password'] = self.chap_passwd
for name in name_dict.keys():
cmd = "iscsiadm --mode node --targetname %s " % self.target
cmd += "--op update --name %s --value %s" % (name, name_dict[name])
try:
process.system(cmd)
except process.CmdError:
logging.error("Fail to set CHAP authentication for initiator")
def logout(self):
"""
Logout from target.
"""
if self.logged_in():
iscsi_logout(self.target)
def cleanup(self):
"""
Clean up env after iscsi used.
"""
self.logout()
iscsi_node_del(self.target)
if os.path.isfile("%s-%s" % (ISCSI_CONFIG_FILE, self.id)):
cmd = "mv %s-%s %s" % (ISCSI_CONFIG_FILE, self.id, ISCSI_CONFIG_FILE)
process.system(cmd)
cmd = "service iscsid restart"
process.system(cmd)
if self.export_flag:
self.delete_target()
class IscsiTGT(_IscsiComm):
"""
iscsi support TGT backend used in RHEL6.
"""
def __init__(self, params, root_dir):
"""
initialize TGT backend for iSCSI
:param params: parameters dict for TGT backend of iSCSI.
"""
super(IscsiTGT, self).__init__(params, root_dir)
def get_target_id(self):
"""
Get target id from image name. Only works for emulated iscsi device
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = process.system_output(cmd)
target_id = ""
for line in re.split("\n", target_info):
if re.findall("Target\s+(\d+)", line):
target_id = re.findall("Target\s+(\d+)", line)[0]
if re.findall("Backing store path:\s+(/+.+)", line):
if self.emulated_image in line:
break
else:
target_id = ""
return target_id
def get_chap_accounts(self):
"""
Get all CHAP authentication accounts
"""
cmd = "tgtadm --lld iscsi --op show --mode account"
all_accounts = process.system_output(cmd)
if all_accounts:
all_accounts = map(str.strip, all_accounts.splitlines()[1:])
return all_accounts
def add_chap_account(self):
"""
Add CHAP authentication account
"""
try:
cmd = "tgtadm --lld iscsi --op new --mode account"
cmd += " --user %s" % self.chap_user
cmd += " --password %s" % self.chap_passwd
process.system(cmd)
except process.CmdError, err:
logging.error("Fail to add account: %s", err)
# Check the new add account exist
if self.chap_user not in self.get_chap_accounts():
logging.error("Can't find account %s" % self.chap_user)
def delete_chap_account(self):
"""
Delete the CHAP authentication account
"""
if self.chap_user in self.get_chap_accounts():
cmd = "tgtadm --lld iscsi --op delete --mode account"
cmd += " --user %s" % self.chap_user
process.system(cmd)
def get_target_account_info(self):
"""
Get the target account information
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = process.system_output(cmd)
pattern = r"Target\s+\d:\s+%s" % self.target
pattern += ".*Account information:\s(.*)ACL information"
try:
target_account = re.findall(pattern, target_info,
re.S)[0].strip().splitlines()
except IndexError:
target_account = []
return map(str.strip, target_account)
def set_chap_auth_target(self):
"""
Set CHAP authentication on a target, it will require authentication
before an initiator is allowed to log in and access devices.
"""
if self.chap_user not in self.get_chap_accounts():
self.add_chap_account()
if self.chap_user in self.get_target_account_info():
logging.debug("Target %s already has account %s", self.target,
self.chap_user)
else:
cmd = "tgtadm --lld iscsi --op bind --mode account"
cmd += " --tid %s --user %s" % (self.emulated_id, self.chap_user)
process.system(cmd)
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
if not os.path.isfile(self.emulated_image):
process.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_size != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
process.system(self.create_cmd)
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = process.system_output(cmd)
except process.CmdError:
process.system("service tgtd restart")
output = process.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure iscsi target
# export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
output = process.system_output(cmd)
used_id = re.findall("Target\s+(\d+)", output)
emulated_id = 1
while str(emulated_id) in used_id:
emulated_id += 1
self.emulated_id = str(emulated_id)
cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
cmd += " --lld iscsi --targetname %s" % self.target
process.system(cmd)
cmd = "tgtadm --lld iscsi --op bind --mode target "
cmd += "--tid %s -I ALL" % self.emulated_id
process.system(cmd)
else:
target_strs = re.findall("Target\s+(\d+):\s+%s$" %
self.target, output, re.M)
self.emulated_id = target_strs[0].split(':')[0].split()[-1]
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = process.system_output(cmd)
except process.CmdError: # In case service stopped
process.system("service tgtd restart")
output = process.system_output(cmd)
# Create a LUN with emulated image
if re.findall(self.emulated_image, output, re.M):
# Exist already
logging.debug("Exported image already exists.")
self.export_flag = True
else:
tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
output, re.DOTALL)
if tgt_str:
luns = len(re.findall("\s+LUN:\s(\d+)",
tgt_str.group(1), re.M))
else:
luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
cmd = "tgtadm --mode logicalunit --op new "
cmd += "--tid %s --lld iscsi " % self.emulated_id
cmd += "--lun %s " % luns
cmd += "--backing-store %s" % self.emulated_image
process.system(cmd)
self.export_flag = True
self.luns = luns
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
def delete_target(self):
"""
Delete target from host.
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
output = process.system_output(cmd)
if re.findall("%s$" % self.target, output, re.M):
if self.emulated_id:
cmd = "tgtadm --lld iscsi --mode target --op delete "
cmd += "--tid %s" % self.emulated_id
process.system(cmd)
if self.restart_tgtd:
cmd = "service tgtd restart"
process.system(cmd)
class IscsiLIO(_IscsiComm):
"""
iscsi support class for LIO backend used in RHEL7.
"""
def __init__(self, params, root_dir):
"""
initialize LIO backend for iSCSI
:param params: parameters dict for LIO backend of iSCSI
"""
super(IscsiLIO, self).__init__(params, root_dir)
def get_target_id(self):
"""
Get target id from image name.
"""
cmd = "targetcli ls /iscsi 1"
target_info = process.system_output(cmd)
target = None
for line in re.split("\n", target_info)[1:]:
if re.findall("o-\s\S+\s[\.]+\s\[TPGs:\s\d\]$", line):
# eg: iqn.2015-05.com.example:iscsi.disk
try:
target = re.findall("iqn[\.]\S+:\S+", line)[0]
except IndexError:
logging.info("No found target in %s", line)
continue
else:
continue
cmd = "targetcli ls /iscsi/%s/tpg1/luns" % target
luns_info = process.system_output(cmd)
for lun_line in re.split("\n", luns_info):
if re.findall("o-\slun\d+", lun_line):
if self.emulated_image in lun_line:
break
else:
target = None
return target
def set_chap_acls_target(self):
"""
set CHAP(acls) authentication on a target.
it will require authentication
before an initiator is allowed to log in and access devices.
notice:
Individual ACL entries override common TPG Authentication,
which can be set by set_chap_auth_target().
"""
# Enable ACL nodes
acls_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = "set attribute generate_node_acls=0"
process.system(acls_cmd + attr_cmd)
# Create user and allow access
acls_cmd = ("targetcli /iscsi/%s/tpg1/acls/ create %s:client"
% (self.target, self.target.split(":")[0]))
output = process.system_output(acls_cmd)
if "Created Node ACL" not in output:
raise exceptions.TestFail("Failed to create ACL. (%s)" % output)
comm_cmd = ("targetcli /iscsi/%s/tpg1/acls/%s:client/"
% (self.target, self.target.split(":")[0]))
# Set userid
userid_cmd = "%s set auth userid=%s" % (comm_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (comm_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
process.system("targetcli / saveconfig")
def set_chap_auth_target(self):
"""
set up authentication information for every single initiator,
which provides the capability to define common login information
for all Endpoints in a TPG
"""
auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = ("set attribute %s %s %s" %
("demo_mode_write_protect=0",
"generate_node_acls=1",
"cache_dynamic_acls=1"))
process.system(auth_cmd + attr_cmd)
# Set userid
userid_cmd = "%s set auth userid=%s" % (auth_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (auth_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
process.system("targetcli / saveconfig")
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
# create image disk
if not os.path.isfile(self.emulated_image):
process.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_size != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
process.system(self.create_cmd)
# confirm if the target exists and create iSCSI target
cmd = "targetcli ls /iscsi 1"
output = process.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure
# iscsi target export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
# In fact, We've got two options here
#
# 1) Create a block backstore that usually provides the best
# performance. We can use a block device like /dev/sdb or
# a logical volume previously created,
# (lvcreate -name lv_iscsi -size 1G vg)
# 2) Create a fileio backstore,
# which enables the local file system cache.
#
# This class Only works for emulated iscsi device,
# So fileio backstore is enough and safe.
# Create a fileio backstore
device_cmd = ("targetcli /backstores/fileio/ create %s %s" %
(self.device, self.emulated_image))
output = process.system_output(device_cmd)
if "Created fileio" not in output:
raise exceptions.TestFail("Failed to create fileio %s. (%s)" %
(self.device, output))
# Create an IQN with a target named target_name
target_cmd = "targetcli /iscsi/ create %s" % self.target
output = process.system_output(target_cmd)
if "Created target" not in output:
raise exceptions.TestFail("Failed to create target %s. (%s)" %
(self.target, output))
check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target
portal_info = process.system_output(check_portal)
if "0.0.0.0:3260" not in portal_info:
# Create portal
# 0.0.0.0 means binding to INADDR_ANY
# and using default IP port 3260
portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s"
% (self.target, "0.0.0.0"))
output = process.system_output(portal_cmd)
if "Created network portal" not in output:
raise exceptions.TestFail("Failed to create portal. (%s)" %
output)
if ("ipv6" == utils_net.IPAddress(self.portal_ip).version and
self.portal_ip not in portal_info):
# Ipv6 portal address can't be created by default,
# create ipv6 portal if needed.
portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s"
% (self.target, self.portal_ip))
output = process.system_output(portal_cmd)
if "Created network portal" not in output:
raise exceptions.TestFail("Failed to create portal. (%s)" %
output)
# Create lun
lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target
dev_cmd = "create /backstores/fileio/%s" % self.device
output = process.system_output(lun_cmd + dev_cmd)
luns = re.findall(r"Created LUN (\d+).", output)
if not luns:
raise exceptions.TestFail("Failed to create lun. (%s)" %
output)
self.luns = luns[0]
# Set firewall if it's enabled
output = process.system_output("firewall-cmd --state",
ignore_status=True)
if re.findall("^running", output, re.M):
# firewall is running
process.system("firewall-cmd --permanent --add-port=3260/tcp")
process.system("firewall-cmd --reload")
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
self.export_flag = True
else:
logging.info("Target %s has already existed!" % self.target)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
else:
# To enable that so-called "demo mode" TPG operation,
# disable all authentication for the corresponding Endpoint.
# which means grant access to all initiators,
# so that they can access all LUNs in the TPG
# without further authentication.
auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = ("set attribute %s %s %s %s" %
("authentication=0",
"demo_mode_write_protect=0",
"generate_node_acls=1",
"cache_dynamic_acls=1"))
output = process.system_output(auth_cmd + attr_cmd)
logging.info("Define access rights: %s" % output)
# Save configuration
process.system("targetcli / saveconfig")
# Restart iSCSI service
process.system("systemctl restart iscsid.service")
def delete_target(self):
"""
Delete target from host.
"""
# Delete block
if self.device is not None:
cmd = "targetcli /backstores/fileio ls"
output = process.system_output(cmd)
if re.findall("%s" % self.device, output, re.M):
dev_del = ("targetcli /backstores/fileio/ delete %s"
% self.device)
process.system(dev_del)
# Delete IQN
cmd = "targetcli ls /iscsi 1"
output = process.system_output(cmd)
if re.findall("%s" % self.target, output, re.M):
del_cmd = "targetcli /iscsi delete %s" % self.target
process.system(del_cmd)
# Save deleted configuration to avoid restoring
cmd = "targetcli / saveconfig"
process.system(cmd)
class Iscsi(object):
"""
Basic iSCSI support class,
which will handle the emulated iscsi export and
access to both real iscsi and emulated iscsi device.
The class support different kinds of iSCSI backend (TGT and LIO),
and return ISCSI instance.
"""
@staticmethod
def create_iSCSI(params, root_dir=data_dir.get_tmp_dir()):
iscsi_instance = None
try:
path.find_command("iscsiadm")
path.find_command("tgtadm")
iscsi_instance = IscsiTGT(params, root_dir)
except path.CmdNotFoundError:
try:
path.find_command("iscsiadm")
path.find_command("targetcli")
iscsi_instance = IscsiLIO(params, root_dir)
except path.CmdNotFoundError:
pass
return iscsi_instance
|
vipmike007/avocado-vt
|
virttest/iscsi.py
|
Python
|
gpl-2.0
| 29,173
| 0.001165
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Cognitive Services error object.
:param error: The error body.
:type error: :class:`ErrorBody
<azure.mgmt.cognitiveservices.models.ErrorBody>`
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorBody'},
}
def __init__(self, error=None):
self.error = error
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
v-iam/azure-sdk-for-python
|
azure-mgmt-cognitiveservices/azure/mgmt/cognitiveservices/models/error.py
|
Python
|
mit
| 1,252
| 0.000799
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
# catalog
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('region', sql.String(255)),
sql.Column('service_id',
sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
# identity
role_table = sql.Table(
'role',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), unique=True, nullable=False))
role_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name tenant_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='tenant_name_key'))
else:
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
tenant_table.create(migrate_engine, checkfirst=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column('user_id', sql.String(64), primary_key=True),
sql.Column('tenant_id', sql.String(64), primary_key=True),
sql.Column('data', sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
ec2_credential_table = sql.Table(
'ec2_credential',
meta,
sql.Column('access', sql.String(64), primary_key=True),
sql.Column('secret', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('tenant_id', sql.String(64)))
ec2_credential_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name user_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='user_name_key'))
else:
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
user_table.create(migrate_engine, checkfirst=True)
user_tenant_membership_table = sql.Table(
'user_tenant_membership',
meta,
sql.Column(
'user_id',
sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True),
sql.Column(
'tenant_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True))
user_tenant_membership_table.create(migrate_engine, checkfirst=True)
# token
token_table = sql.Table(
'token',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('expires', sql.DateTime()),
sql.Column('extra', sql.Text()))
token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['user_tenant_membership', 'token', 'user', 'tenant', 'role',
'metadata', 'ec2_credential', 'endpoint', 'service']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
|
dsiddharth/access-keys
|
keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
|
Python
|
apache-2.0
| 5,811
| 0
|
"""
The `compat` module provides support for backwards compatibility with older
versions of Django/Python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
import inspect
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models, transaction
from django.template import Context, RequestContext, Template
from django.utils import six
from django.views.generic import View
try:
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, ResolverMatch, Resolver404, get_script_prefix, reverse, reverse_lazy, resolve
)
except ImportError:
from django.core.urlresolvers import ( # Will be removed in Django 2.0
NoReverseMatch, RegexURLPattern, RegexURLResolver, ResolverMatch, Resolver404, get_script_prefix, reverse, reverse_lazy, resolve
)
try:
import urlparse # Python 2.x
except ImportError:
import urllib.parse as urlparse
def unicode_repr(instance):
# Get the repr of an instance, but ensure it is a unicode string
# on both python 3 (already the case) and 2 (not the case).
if six.PY2:
return repr(instance).decode('utf-8')
return repr(instance)
def unicode_to_repr(value):
# Coerce a unicode string to the correct repr return type, depending on
# the Python version. We wrap all our `__repr__` implementations with
# this and then use unicode throughout internally.
if six.PY2:
return value.encode('utf-8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
def distinct(queryset, base):
if settings.DATABASES[queryset.db]["ENGINE"] == "django.db.backends.oracle":
# distinct analogue for Oracle users
return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))
return queryset.distinct()
# Obtaining manager instances and names from model options differs after 1.10.
def get_names_and_managers(options):
if django.VERSION >= (1, 10):
# Django 1.10 onwards provides a `.managers` property on the Options.
return [
(manager.name, manager)
for manager
in options.managers
]
# For Django 1.8 and 1.9, use the three-tuple information provided
# by .concrete_managers and .abstract_managers
return [
(manager_info[1], manager_info[2])
for manager_info
in (options.concrete_managers + options.abstract_managers)
]
# field.rel is deprecated from 1.9 onwards
def get_remote_field(field, **kwargs):
if 'default' in kwargs:
if django.VERSION < (1, 9):
return getattr(field, 'rel', kwargs['default'])
return getattr(field, 'remote_field', kwargs['default'])
if django.VERSION < (1, 9):
return field.rel
return field.remote_field
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = apps.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def is_authenticated(user):
if django.VERSION < (1, 10):
return user.is_authenticated()
return user.is_authenticated
def is_anonymous(user):
if django.VERSION < (1, 10):
return user.is_anonymous()
return user.is_anonymous
def get_related_model(field):
if django.VERSION < (1, 9):
return _resolve_model(field.rel.to)
return field.remote_field.model
def value_from_object(field, obj):
if django.VERSION < (1, 9):
return field._get_val_from_obj(obj)
return field.value_from_object(obj)
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# JSONField is only supported from 1.9 onwards
try:
from django.contrib.postgres.fields import JSONField
except ImportError:
JSONField = None
# coreapi is optional (Note that uritemplate is a dependency of coreapi)
try:
import coreapi
import uritemplate
except (ImportError, SyntaxError):
# SyntaxError is possible under python 3.2
coreapi = None
uritemplate = None
# coreschema is optional
try:
import coreschema
except ImportError:
coreschema = None
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
# django-crispy-forms is optional
try:
import crispy_forms
except ImportError:
crispy_forms = None
# requests is optional
try:
import requests
except ImportError:
requests = None
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
try:
if 'guardian' in settings.INSTALLED_APPS:
import guardian
except ImportError:
pass
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# Markdown is optional
try:
import markdown
if markdown.version <= '2.2':
HEADERID_EXT_PATH = 'headerid'
LEVEL_PARAM = 'level'
elif markdown.version < '2.6':
HEADERID_EXT_PATH = 'markdown.extensions.headerid'
LEVEL_PARAM = 'level'
else:
HEADERID_EXT_PATH = 'markdown.extensions.toc'
LEVEL_PARAM = 'baselevel'
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = [HEADERID_EXT_PATH]
extension_configs = {
HEADERID_EXT_PATH: {
LEVEL_PARAM: '2'
}
}
md = markdown.Markdown(
extensions=extensions, extension_configs=extension_configs
)
return md.convert(text)
except ImportError:
apply_markdown = None
markdown = None
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
def pygments_highlight(text, lang, style):
lexer = get_lexer_by_name(lang, stripall=False)
formatter = HtmlFormatter(nowrap=True, style=style)
return pygments.highlight(text, lexer, formatter)
def pygments_css(style):
formatter = HtmlFormatter(style=style)
return formatter.get_style_defs('.highlight')
except ImportError:
pygments = None
def pygments_highlight(text, lang, style):
return text
def pygments_css(style):
return None
try:
import pytz
from pytz.exceptions import InvalidTimeError
except ImportError:
InvalidTimeError = Exception
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
try:
# DecimalValidator is unavailable in Django < 1.9
from django.core.validators import DecimalValidator
except ImportError:
DecimalValidator = None
def set_rollback():
if hasattr(transaction, 'set_rollback'):
if connection.settings_dict.get('ATOMIC_REQUESTS', False):
# If running in >=1.6 then mark a rollback as required,
# and allow it to be handled by Django.
if connection.in_atomic_block:
transaction.set_rollback(True)
elif transaction.is_managed():
# Otherwise handle it explicitly if in managed mode.
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
else:
# transaction not managed
pass
def template_render(template, context=None, request=None):
"""
Passing Context or RequestContext to Template.render is deprecated in 1.9+,
see https://github.com/django/django/pull/3883 and
https://github.com/django/django/blob/1.9/django/template/backends/django.py#L82-L84
:param template: Template instance
:param context: dict
:param request: Request instance
:return: rendered template as SafeText instance
"""
if isinstance(template, Template):
if request:
context = RequestContext(request, context)
else:
context = Context(context)
return template.render(context)
# backends template, e.g. django.template.backends.django.Template
else:
return template.render(context, request=request)
def set_many(instance, field, value):
if django.VERSION < (1, 10):
setattr(instance, field, value)
else:
field = getattr(instance, field)
field.set(value)
def include(module, namespace=None, app_name=None):
from django.conf.urls import include
if django.VERSION < (1,9):
return include(module, namespace, app_name)
else:
return include((module, app_name), namespace)
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/rest_framework/compat.py
|
Python
|
mit
| 10,280
| 0.000486
|
""" generic tests from the Datetimelike class """
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, Index, DatetimeIndex, date_range
from ..datetimelike import DatetimeLike
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
self.indices = dict(index=tm.makeDateIndex(10),
index_dec=date_range('20130110', periods=10,
freq='-1D'))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py
|
Python
|
apache-2.0
| 2,661
| 0
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright (c) 2012-2014, Michael Reuter
# Distributed under the MIT License. See LICENSE.txt for more information.
#------------------------------------------------------------------------------
from .moon_info import MoonInfo
from .observing_site import ObservingSite
class ObservingInfo(object):
'''
This class is responsible for keeping the observing site information and
the moon information object together. It will be responsible for updating
any of the observing site information that then affects the moon
information.
'''
__shared_state = {"obs_site": ObservingSite(),
"moon_info": MoonInfo()}
def __init__(self):
'''
Constructor
'''
self.__dict__ = self.__shared_state
def update(self):
self.moon_info.compute(self.obs_site.getObserver())
if __name__ == "__main__":
oi = ObservingInfo()
oi.update()
print oi.obs_site
import time
time.sleep(2)
oi2 = ObservingInfo()
oi2.update()
print oi2.obs_site
|
mareuter/lct-python
|
lct/utils/observing_info.py
|
Python
|
mit
| 1,166
| 0.008576
|
import os
import platform
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
from os import environ, path
from threading import Timer
import grpc
import ptvsd
from getgauge import handlers, logger, processor
from getgauge.impl_loader import copy_skel_files
from getgauge.messages import runner_pb2_grpc
from getgauge.static_loader import load_files
from getgauge.util import get_step_impl_dirs
PLUGIN_JSON = 'python.json'
VERSION = 'version'
ATTACH_DEBUGGER_EVENT = 'Runner Ready for Debugging'
def main():
logger.info("Python: {}".format(platform.python_version()))
if sys.argv[1] == "--init":
logger.debug("Initilizing gauge project.")
copy_skel_files()
else:
load_implementations()
start()
def load_implementations():
d = get_step_impl_dirs()
logger.debug(
"Loading step implemetations from {} dirs.".format(', '.join(d)))
for impl_dir in d:
if not path.exists(impl_dir):
logger.error('can not load implementations from {}. {} does not exist.'.format(
impl_dir, impl_dir))
load_files(d)
def _handle_detached():
logger.info("No debugger attached. Stopping the execution.")
os._exit(1)
def start():
if environ.get('DEBUGGING'):
ptvsd.enable_attach(address=(
'127.0.0.1', int(environ.get('DEBUG_PORT'))))
print(ATTACH_DEBUGGER_EVENT)
t = Timer(int(environ.get("debugger_wait_time", 30)), _handle_detached)
t.start()
ptvsd.wait_for_attach()
t.cancel()
logger.debug('Starting grpc server..')
server = grpc.server(ThreadPoolExecutor(max_workers=1))
p = server.add_insecure_port('127.0.0.1:0')
handler = handlers.RunnerServiceHandler(server)
runner_pb2_grpc.add_RunnerServicer_to_server(handler, server)
logger.info('Listening on port:{}'.format(p))
server.start()
t = threading.Thread(
name="listener", target=handler.wait_for_kill_event)
t.start()
t.join()
os._exit(0)
if __name__ == '__main__':
main()
|
kashishm/gauge-python
|
start.py
|
Python
|
mit
| 2,076
| 0.000482
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_WeekOfYear'] , ['NoAR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_Seasonal_WeekOfYear_NoAR.py
|
Python
|
bsd-3-clause
| 163
| 0.04908
|
import argparse
import taichi as ti
FRAMES = 100
def test_ad_gravity():
from taichi.examples.simulation.ad_gravity import init, substep
init()
for _ in range(FRAMES):
for _ in range(50):
substep()
def video_ad_gravity(result_dir):
import numpy as np
from taichi.examples.simulation.ad_gravity import init, substep, x
video_manager = ti.tools.VideoManager(output_dir=result_dir,
framerate=24,
automatic_build=False)
gui = ti.GUI('Autodiff gravity', show_gui=False)
init()
for _ in range(FRAMES):
for _ in range(50):
substep()
gui.circles(x.to_numpy(), radius=3)
video_manager.write_frame(gui.get_image())
gui.clear()
video_manager.make_video(mp4=True, gif=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate ad_gravity video')
parser.add_argument('output_directory',
help='output directory of generated video')
video_ad_gravity(parser.parse_args().output_directory)
|
yuanming-hu/taichi
|
tests/python/examples/simulation/test_ad_gravity.py
|
Python
|
mit
| 1,137
| 0
|
"""This script automates the copying of the default keymap into your own keymap.
"""
import shutil
from pathlib import Path
import qmk.path
from qmk.decorators import automagic_keyboard, automagic_keymap
from milc import cli
@cli.argument('-kb', '--keyboard', help='Specify keyboard name. Example: 1upkeyboards/1up60hse')
@cli.argument('-km', '--keymap', help='Specify the name for the new keymap directory')
@cli.subcommand('Creates a new keymap for the keyboard of your choosing')
@automagic_keyboard
@automagic_keymap
def new_keymap(cli):
"""Creates a new keymap for the keyboard of your choosing.
"""
# ask for user input if keyboard or keymap was not provided in the command line
keyboard = cli.config.new_keymap.keyboard if cli.config.new_keymap.keyboard else input("Keyboard Name: ")
keymap = cli.config.new_keymap.keymap if cli.config.new_keymap.keymap else input("Keymap Name: ")
# generate keymap paths
kb_path = Path('keyboards') / keyboard
keymap_path = qmk.path.keymap(keyboard)
keymap_path_default = keymap_path / 'default'
keymap_path_new = keymap_path / keymap
# check directories
if not kb_path.exists():
cli.log.error('Keyboard %s does not exist!', kb_path)
return False
if not keymap_path_default.exists():
cli.log.error('Keyboard default %s does not exist!', keymap_path_default)
return False
if keymap_path_new.exists():
cli.log.error('Keymap %s already exists!', keymap_path_new)
return False
# create user directory with default keymap files
shutil.copytree(keymap_path_default, keymap_path_new, symlinks=True)
# end message to user
cli.log.info("%s keymap directory created in: %s", keymap, keymap_path_new)
cli.log.info("Compile a firmware with your new keymap by typing: \n\n\tqmk compile -kb %s -km %s\n", keyboard, keymap)
|
kmtoki/qmk_firmware
|
lib/python/qmk/cli/new/keymap.py
|
Python
|
gpl-2.0
| 1,884
| 0.004246
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 AirPlug Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on 2012. 7. 19.
@author: springchoi
"""
import sys
from datatype import *
import worker
import re
import traceback
import time
import datetime
import collections
import MySQLdb
try:
from worker import log
except ImportError:
import logging as log
Priority = collections.namedtuple('Priority', 'LOW NORMAL HIGH')._make(range(3))
class UniqueList(list):
key = lambda x: x
def setKey(self, key=lambda x: x):
if not callable(key):
raise RuntimeError("Key is not callable")
self.key = key
def addSet(self, item, key=None):
if not key:
key = self.key
elif not callable(key):
raise RuntimeError("Key is not callable")
if len(filter(lambda x: key(x) == key(item), self)) > 0:
return False
self.append(item)
return True
class GeoInfo(collections.namedtuple('_GeoInfo', 'lat, lng, acc, geosrc, from_cell')):
def __new__(cls, lat=-9999, lng=-9999, acc=50000, geosrc='unknown', from_cell=False):
# add default values
return super(GeoInfo, cls).__new__(cls, lat, lng, acc, geosrc, from_cell)
class WiFiNode(collections.namedtuple('_WiFiNode', 'state, bssid, ssid, rssi, regdtm, bregap, bmap, optrcom, geoloc, priority')):
__registerdApSSIDPattern = {'LGT':('U\+',), 'KT':('olleh_GiGA_WiFi', 'ollehWiFi', 'NESPOT', 'QOOKnSHOW'), 'SKT':('T wifi zone',)}
__hotspotApSSIDPattern = ('AndroidHotspot', 'AndroidAP', 'HTC-', 'Galaxy ', 'SKY A')
__mobileApSSIDPattern = ('WibroEgg', 'ollehEgg', 'KWI-B', 'SHOW_JAPAN_EGG', 'egg\Z')
def __new__(cls, state, bssid, ssid='', regdtm='19000101000000', rssi=-200, bregap=False, bmap=False, optrcom='none', geoloc=None, priority=Priority.NORMAL):
# Classify WiFi
try:
if ssid not in ('', None):
ssid = re.sub(r'^\s*"(.*)"\s*$', r'\1', unicode(ssid))
if ssid.find('"') >= 0:
log.error("!!! SSID - %s" % ssid)
if cls.isHotspot(ssid):
priority = Priority.LOW
else:
optrcom = cls.getWiFiOperator(ssid)
bregap = True if optrcom != 'none' else False
if not bregap:
bmap = cls.isMobile(ssid)
try:
ssid = MySQLdb.escape_string(unicode(ssid).encode('utf-8'))
except Exception, e:
# Non-ascii data.
log.warn("SSID MySQLdb.escape_string Error - %s, %s" % (ssid, e))
if not geoloc:
geoloc = GeoInfo()
except Exception, e:
log.error(e)
log.error('BSSID - %s, SSID - %s' % (bssid, ssid))
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(traceback.format_exception(exc_type, exc_value, exc_traceback))
raise e
return super(WiFiNode, cls).__new__(cls, state, bssid, ssid, rssi, regdtm, bregap, bmap, optrcom, geoloc, priority)
@classmethod
def isHotspot(cls, ssid):
patt = r'%s' % '|'.join(cls.__hotspotApSSIDPattern)
if re.match(patt, ssid, re.IGNORECASE):
#log.info("%s - Hotspot SSID, drop this AP" % ssid)
return True
@classmethod
def getWiFiOperator(cls, ssid):
for provider in cls.__registerdApSSIDPattern.keys():
patt = r'%s' % '|'.join(cls.__registerdApSSIDPattern[provider])
if re.match(patt, ssid, re.IGNORECASE):
#log.info("Registered SSID - %s" % ssid)
return provider
return 'none'
@classmethod
def isMobile(cls, ssid):
patt = r'%s' % '|'.join(cls.__mobileApSSIDPattern)
if re.search(patt, ssid, re.IGNORECASE):
#log.info("Mobile AP - %s" % ssid)
return True
return False
class CellNode(collections.namedtuple('_CellNode', 'state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority')):
def __new__(cls, state, cellid, celltype=0, regdtm='19000101000000', geoloc=None, priority=Priority.NORMAL):
# add default values
try:
plmnid, cid, lac = cellid.split('_')
# guard from invalid data
if len(plmnid) > 6 or int(plmnid) == 0:
plmnid = '0'
if not geoloc:
geoloc = GeoInfo()
except Exception, e:
raise e
return super(CellNode, cls).__new__(cls, state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority)
def addWiFi(cursor, node):
strSql = """INSERT INTO
apmain.apinfo (bssid, ssid, regdtm, bregap, bmap, lat, lng, acc, geosrc, optrcom, seq)
VALUES('%s','%s','%s','%d','%d','%f','%f','%d','%s','%s','%s')
ON DUPLICATED UPDATE
lat = IF(VALUES(seq) > seq, VALUES(lat), lat),
lng = IF(VALUES(seq) > seq, VALUES(lng), lng),
seq = IF(VALUES(seq) > seq, VALUES(seq), seq),
acc = IF(VALUES(seq) > seq, VALUES(acc), acc),
geosrc=VALUES(geosrc)"""
try:
strSql = strSql % (node.bssid, node.ssid, node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi)
except Exception, e:
log.error("SQL GEN ERR - %s" % bytes(node.ssid))
strSql = strSql % (node.bssid, '', node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi)
try:
cursor.execute(strSql)
log.debug("INSERT - %s" % node.bssid)
except Exception, e:
# Duplicate entry error
if e[0] != 1062:
log.error(e)
log.error(strSql)
return False
return True
netTypeCode = {'gsm':1, 'cdma':2, 'lte':3}
def addCellTower(cursor, node):
strSql = """INSERT INTO
apmain.cellinfo (fullid, plmnid, cellid, lac, celltype, regdtm, lat, lng, acc, geosrc, seq)
VALUES('%s','%s','%s','%s','%d','%s','%s','%f','%f','%s', '1')
ON DUPLICATED UPDATE lat=((lat*seq)+VALUES(lat))/(seq+1), lng=((lng*seq)+VALUES(lng))/(seq+1), seq=seq+1, geosrc=VALUES(geosrc)"""
try:
strSql = strSql % (node.cellid, node.plmnid, node.cid, node.lac, 0, node.regdtm, node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, 'cellLoc' if node.geoloc.from_cell else node.geoloc.geosrc)
cursor.execute(strSql)
log.debug("INSERT - %s" % node.cellid)
except Exception, e:
# Duplicate entry error
if e[0] != 1062:
log.error(e)
log.error(strSql)
return False
return True
class ProcessNetworkNode(object):
OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog', 'evtNetworkLog']
OW_TASK_PUBLISH_EVENTS = []
OW_USE_HASHING = False
OW_HASH_KEY = None
OW_NUM_WORKER = 8
def publishEvent(self, event, params):
# THIS METHOD WILL BE OVERRIDE
# DO NOT EDIT THIS METHOD
pass
def __makeCellId(self, plmnid, cid, lac):
try:
cellId = map(lambda x: str(x) if str(x).isdigit() else '0', [plmnid, cid, lac])
if 0 not in map(int, cellId) and len(cellId[0]) < 7:
return '_'.join(cellId)
except Exception, e:
log.error(e)
return None
def extractNetworkNode(self, params):
# net info structure
# wifi : 'wifi', status, ssid, bssid
# cell : 'cell', status, celltower id
# status : 'active' for current network, 'inactive' for logged network
timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime(params['tTM']))
logType = params.get('log_type', 'unknown')
netList = UniqueList()
netList.setKey(key=lambda x: x.cellid if isinstance(x, CellNode) else x.bssid)
if 'lat' in params and 'lng' in params:
geoloc = GeoInfo(lat=params.get('lat'), lng=params.get('lng'), acc=params.get('accuracy', 500), geosrc='device')
else:
geoloc = None
# APAT Header fields
try:
if 'pwf' in params:
pwf = params['pwf']
if 'bssid' in pwf and EthAddrType.isEthAddr(pwf['bssid']):
node = WiFiNode(state='active', bssid=pwf['bssid'], ssid=pwf.get('ssid', ''), regdtm=timestamp, geoloc=geoloc)
netList.addSet(node)
except Exception, e:
log.error(e)
log.error(params)
try:
if 'pcell_list' in params and isinstance(params['pcell_list'], list) and len(params['pcell_list']) > 0:
pcell = params['pcell_list'][0]
if 'cid' in pcell and 'lac' in pcell:
cellId = self.__makeCellId(int("%03d%02d" % (pcell.get('mcc', 0), pcell.get('mnc', 0))), pcell.get('cid'), pcell.get('lac'))
if cellId:
if 'ctype' in pcell and str(pcell['ctype']).isdigit():
ctype = int(pcell.get('ctype', -1)) + 1 # -1 : Unknown
cellType = ctype if ctype in netTypeCode.values() else 0
else:
cellType = 0
node = CellNode(state='active', cellid=cellId, celltype=cellType, regdtm=timestamp, geoloc=geoloc, priority=Priority.HIGH)
netList.addSet(node)
except Exception, e:
log.error(e)
log.error(params)
return netList
def handler(self, params):
# Event Key/Value 인 경우, 무시
if 'evtKey' in params.keys():
return
# Extract network nodes from logging
try:
networks = self.extractNetworkNode(params)
except Exception, e:
log.error(e)
log.error(params)
return
cursor = worker.dbmanager.allocDictCursor('myapmain')
try:
for node in networks:
nodeType = 'wifi' if isinstance(node, WiFiNode) else 'cell'
if nodeType == 'wifi' and node.priority > Priority.LOW:
try:
addWiFi(cursor, node)
except Exception, e:
log.error(e)
elif nodeType == 'cell':
try:
addCellTower(cursor, node)
except Exception, e:
log.error(e)
except Exception, e:
log.error(e)
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(traceback.format_exception(exc_type, exc_value, exc_traceback))
worker.dbmanager.freeCursor(cursor)
class CheckCellPCIField(object):
OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog', 'evtNetworkLog']
OW_TASK_PUBLISH_EVENTS = []
OW_USE_HASHING = False
OW_HASH_KEY = None
OW_NUM_WORKER = 8
def __init__(self):
pass
def publishEvent(self, event, params):
# THIS METHOD WILL BE OVERRIDE
# DO NOT EDIT THIS METHOD
pass
def handler(self, params):
if not isinstance(params, dict):
return
pcell = params.get('pcell')
pcellList = params.get('pcell_list')
if not pcell or not pcellList:
return
try:
plmnid = pcell.get('mcc') + pcell.get('mnc')
except Exception, e:
return
pciInfo = {}
for cell in pcellList:
if 'pci' not in cell:
continue
try:
cellId = '%s_%s_%s' % (plmnid, cell['cid'], cell['tac'])
except Exception, e:
continue
pciInfo[cellId] = cell['pci']
cursor = worker.dbmanager.allocDictCursor('myapmain')
strSql = "UPDATE apmain.cellinfo SET pci='%s' WHERE fullid=%s and pci is null"
try:
cursor.executemany(strSql, [(v[1], v[0]) for v in pciInfo.items()])
except Exception, e:
log.error(e)
worker.dbmanager.freeCursor(cursor)
|
ddinsight/dd-streamworks
|
stream_worker/devmodule/production/networklog/__init__.py
|
Python
|
apache-2.0
| 12,862
| 0.003424
|
# -*- coding: utf8 -*-
SQL = (
('list_fonds_report1', """
select
F.FKOD,F.FNAME, (F.A16+if(F.A22,A22,0)) as A16
FROM
`af3_fond` F
WHERE
FNAME like ('%%%(qr)s%%') or A1 like ('%%%(qr)s%%')
ORDER BY FKOD;"""),
)
FOUND_ROWS = True
ROOT = "fonds"
ROOT_PREFIX = None
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ()
PARAM = ("qr",)
TITLE="Поиск фондов"
MESSAGE="Нет результатов по вашему запросу, вернитесь назад"
ORDER = None
|
ffsdmad/af-web
|
cgi-bin/plugins2/report/fond_search_report1.py
|
Python
|
gpl-3.0
| 547
| 0.006085
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2007, 2009, 2011-2012 J. David Ibáñez <jdavid.ibp@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
import os
from datetime import datetime, timedelta, time
from heapq import heappush, heappop
from multiprocessing import Process
from os.path import abspath, dirname
from uuid import uuid4
# Import from pygit2
from pygit2 import TreeBuilder, GIT_FILEMODE_TREE, init_repository
# Import from itools
from itools.database import Metadata
from itools.database.magic_ import magic_from_buffer
from itools.database.git import open_worktree
from itools.fs import lfs
# Import from here
from catalog import Catalog, _get_xquery, SearchResults, make_catalog
from patchs import PatchsBackend
from registry import register_backend
TEST_DB_WITHOUT_COMMITS = bool(int(os.environ.get('TEST_DB_WITHOUT_COMMITS') or 0))
TEST_DB_DESACTIVATE_GIT = bool(int(os.environ.get('TEST_DB_DESACTIVATE_GIT') or 0))
class Heap(object):
"""
This object behaves very much like a sorted dict, but for security only a
subset of the dict API is exposed:
>>> len(heap)
>>> heap[path] = value
>>> value = heap.get(path)
>>> path, value = heap.popitem()
The keys are relative paths as used in Git trees, like 'a/b/c' (and '' for
the root).
The dictionary is sorted so deeper paths are considered smaller, and so
returned first by 'popitem'. The order relation between two paths of equal
depth is undefined.
This data structure is used by RWDatabase._save_changes to build the tree
objects before commit.
"""
def __init__(self):
self._dict = {}
self._heap = []
def __len__(self):
return len(self._dict)
def get(self, path):
return self._dict.get(path)
def __setitem__(self, path, value):
if path not in self._dict:
n = -path.count('/') if path else 1
heappush(self._heap, (n, path))
self._dict[path] = value
def popitem(self):
key = heappop(self._heap)
path = key[1]
return path, self._dict.pop(path)
class GitBackend(object):
def __init__(self, path, fields, read_only=False):
self.nb_transactions = 0
self.last_transaction_dtime = None
self.path = abspath(path) + '/'
self.fields = fields
self.read_only = read_only
# Open database
self.path_data = '%s/database/' % self.path
# Check if is a folder
self.path_data = '%s/database/' % self.path
if not lfs.is_folder(self.path_data):
error = '"{0}" should be a folder, but it is not'.format(self.path_data)
raise ValueError(error)
# New interface to Git
self.worktree = open_worktree(self.path_data)
# Initialize the database, but chrooted
self.fs = lfs.open(self.path_data)
# Static FS
database_static_path = '{0}/database_static'.format(path)
if not lfs.exists(database_static_path):
self.init_backend_static(path)
self.static_fs = lfs.open(database_static_path)
# Patchs backend
self.patchs_backend = PatchsBackend(path, self.fs, read_only)
# Catalog
self.catalog = self.get_catalog()
@classmethod
def init_backend(cls, path, fields, init=False, soft=False):
# Metadata database
init_repository('{0}/database'.format(path), bare=False)
# Init backend static
cls.init_backend_static(path)
# Make catalog
make_catalog('{0}/catalog'.format(path), fields)
@classmethod
def init_backend_static(cls, path):
# Static database
lfs.make_folder('{0}/database_static'.format(path))
lfs.make_folder('{0}/database_static/.history'.format(path))
#######################################################################
# Database API
#######################################################################
def normalize_key(self, path, __root=None):
# Performance is critical so assume the path is already relative to
# the repository.
key = __root.resolve(path)
if key and key[0] == '.git':
err = "bad '{0}' path, access to the '.git' folder is denied"
raise ValueError(err.format(path))
return '/'.join(key)
def handler_exists(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.exists(key)
def get_handler_names(self, key):
return self.fs.get_names(key)
def get_handler_data(self, key):
if not key:
return None
fs = self.get_handler_fs_by_key(key)
with fs.open(key) as f:
return f.read()
def get_handler_mimetype(self, key):
data = self.get_handler_data(key)
return magic_from_buffer(data)
def handler_is_file(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_file(key)
def handler_is_folder(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_folder(key)
def get_handler_mtime(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.get_mtime(key)
def save_handler(self, key, handler):
data = handler.to_str()
# Save the file
fs = self.get_handler_fs(handler)
# Write and truncate (calls to "_save_state" must be done with the
# pointer pointing to the beginning)
if not fs.exists(key):
with fs.make_file(key) as f:
f.write(data)
f.truncate(f.tell())
else:
with fs.open(key, 'w') as f:
f.write(data)
f.truncate(f.tell())
# Set dirty = None
handler.timestamp = self.get_handler_mtime(key)
handler.dirty = None
def traverse_resources(self):
raise NotImplementedError
def get_handler_fs(self, handler):
if isinstance(handler, Metadata):
return self.fs
return self.static_fs
def get_handler_fs_by_key(self, key):
if key.endswith('metadata'):
return self.fs
return self.static_fs
def add_handler_into_static_history(self, key):
the_time = datetime.now().strftime('%Y%m%d%H%M%S')
new_key = '.history/{0}.{1}.{2}'.format(key, the_time, uuid4())
parent_path = dirname(new_key)
if not self.static_fs.exists(parent_path):
self.static_fs.make_folder(parent_path)
self.static_fs.copy(key, new_key)
def do_transaction(self, commit_message, data, added, changed, removed, handlers,
docs_to_index, docs_to_unindex):
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
# Statistics
self.nb_transactions += 1
# Add static changed & removed files to ~/database_static/.history/
changed_and_removed = list(changed) + list(removed)
for key in changed_and_removed:
if not key.endswith('metadata'):
self.add_handler_into_static_history(key)
# Create patch if there's changed
if added or changed or removed:
self.patchs_backend.create_patch(added, changed, removed, handlers, git_author)
else:
# it's a catalog transaction, we have to do nothing
pass
# Added and changed
added_and_changed = list(added) + list(changed)
for key in added_and_changed:
handler = handlers.get(key)
parent_path = dirname(key)
fs = self.get_handler_fs(handler)
if not fs.exists(parent_path):
fs.make_folder(parent_path)
self.save_handler(key, handler)
# Remove files (if not removed via git-rm)
for key in removed:
if not key.endswith('metadata') or TEST_DB_WITHOUT_COMMITS:
fs = self.get_handler_fs_by_key(key)
fs.remove(key)
# Do git transaction for metadata
if not TEST_DB_WITHOUT_COMMITS:
self.do_git_transaction(commit_message, data, added, changed, removed, handlers)
else:
# Commit at start
if not self.last_transaction_dtime:
self.do_git_big_commit()
else:
now = datetime.now()
t = now.time()
is_night = time(21, 00) < t or t < time(06, 00)
done_recently = now - self.last_transaction_dtime < timedelta(minutes=120)
if is_night and not done_recently:
self.do_git_big_commit()
# Catalog
for path in docs_to_unindex:
self.catalog.unindex_document(path)
for resource, values in docs_to_index:
self.catalog.index_document(values)
self.catalog.save_changes()
def do_git_big_commit(self):
""" Some databases are really bigs (1 millions files). GIT is too slow in this cases.
So we don't commit at each transaction, but at each N transactions.
"""
if TEST_DB_DESACTIVATE_GIT is True:
return
p1 = Process(target=self._do_git_big_commit)
p1.start()
self.last_transaction_dtime = datetime.now()
def _do_git_big_commit(self):
worktree = self.worktree
worktree._call(['git', 'add', '-A'])
worktree._call(['git', 'commit', '-m', 'Autocommit'])
def do_git_transaction(self, commit_message, data, added, changed, removed, handlers):
worktree = self.worktree
# 3. Git add
git_add = list(added) + list(changed)
git_add = [x for x in git_add if x.endswith('metadata')]
worktree.git_add(*git_add)
# 3. Git rm
git_rm = list(removed)
git_rm = [x for x in git_rm if x.endswith('metadata')]
worktree.git_rm(*git_rm)
# 2. Build the 'git commit' command
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
git_msg = git_msg or 'no comment'
# 4. Create the tree
repo = worktree.repo
index = repo.index
try:
head = repo.revparse_single('HEAD')
except KeyError:
git_tree = None
else:
root = head.tree
# Initialize the heap
heap = Heap()
heap[''] = repo.TreeBuilder(root)
for key in git_add:
entry = index[key]
heap[key] = (entry.oid, entry.mode)
for key in git_rm:
heap[key] = None
while heap:
path, value = heap.popitem()
# Stop condition
if path == '':
git_tree = value.write()
break
if type(value) is TreeBuilder:
if len(value) == 0:
value = None
else:
oid = value.write()
value = (oid, GIT_FILEMODE_TREE)
# Split the path
if '/' in path:
parent, name = path.rsplit('/', 1)
else:
parent = ''
name = path
# Get the tree builder
tb = heap.get(parent)
if tb is None:
try:
tentry = root[parent]
except KeyError:
tb = repo.TreeBuilder()
else:
tree = repo[tentry.oid]
tb = repo.TreeBuilder(tree)
heap[parent] = tb
# Modify
if value is None:
# Sometimes there are empty folders left in the
# filesystem, but not in the tree, then we get a
# "Failed to remove entry" error. Be robust.
if tb.get(name) is not None:
tb.remove(name)
else:
tb.insert(name, value[0], value[1])
# 5. Git commit
worktree.git_commit(git_msg, git_author, git_date, tree=git_tree)
def abort_transaction(self):
self.catalog.abort_changes()
#from pygit2 import GIT_CHECKOUT_FORCE, GIT_CHECKOUT_REMOVE_UNTRACKED
# Don't need to abort since git add is made à last minute
#strategy = GIT_CHECKOUT_FORCE | GIT_CHECKOUT_REMOVE_UNTRACKED
#if pygit2.__version__ >= '0.21.1':
# self.worktree.repo.checkout_head(strategy=strategy)
#else:
# self.worktree.repo.checkout_head(strategy)
def flush_catalog(self, docs_to_unindex, docs_to_index):
for path in docs_to_unindex:
self.catalog.unindex_document(path)
for resource, values in docs_to_index:
self.catalog.index_document(values)
def get_catalog(self):
path = '{0}/catalog'.format(self.path)
if not lfs.is_folder(path):
return None
return Catalog(path, self.fields, read_only=self.read_only)
def search(self, query=None, **kw):
"""Launch a search in the catalog.
"""
catalog = self.catalog
xquery = _get_xquery(catalog, query, **kw)
return SearchResults(catalog, xquery)
def close(self):
self.catalog.close()
register_backend('git', GitBackend)
|
hforge/itools
|
itools/database/backends/git.py
|
Python
|
gpl-3.0
| 14,062
| 0.00313
|
import os
import time
import unittest
from typing import Callable
from unittest.mock import patch
from uuid import uuid4
from freezegun import freeze_time
from ray_release.exception import (
ClusterCreationError,
ClusterStartupError,
ClusterStartupTimeout,
ClusterStartupFailed,
ClusterEnvBuildError,
ClusterEnvBuildTimeout,
ClusterComputeCreateError,
ClusterEnvCreateError,
)
from ray_release.cluster_manager.full import FullClusterManager
from ray_release.cluster_manager.minimal import MinimalClusterManager
from ray_release.tests.utils import (
UNIT_TEST_PROJECT_ID,
UNIT_TEST_CLOUD_ID,
APIDict,
fail_always,
fail_once,
MockSDK,
)
from ray_release.util import get_anyscale_sdk
TEST_CLUSTER_ENV = {
"base_image": "anyscale/ray:nightly-py37",
"env_vars": {},
"python": {
"pip_packages": [],
},
"conda_packages": [],
"post_build_cmds": [f"echo {uuid4().hex[:8]}"],
}
TEST_CLUSTER_COMPUTE = {
"cloud_id": UNIT_TEST_CLOUD_ID,
"region": "us-west-2",
"max_workers": 0,
"head_node_type": {"name": "head_node", "instance_type": "m5.4xlarge"},
"worker_node_types": [
{
"name": "worker_node",
"instance_type": "m5.xlarge",
"min_workers": 0,
"max_workers": 0,
"use_spot": False,
}
],
}
def _fail(*args, **kwargs):
raise RuntimeError()
class _DelayedResponse:
def __init__(
self,
callback: Callable[[], None],
finish_after: float,
before: APIDict,
after: APIDict,
):
self.callback = callback
self.finish_after = time.monotonic() + finish_after
self.before = before
self.after = after
def __call__(self, *args, **kwargs):
self.callback()
if time.monotonic() > self.finish_after:
return self.after
else:
return self.before
class MinimalSessionManagerTest(unittest.TestCase):
cls = MinimalClusterManager
def setUp(self) -> None:
self.sdk = MockSDK()
self.sdk.returns["get_project"] = APIDict(
result=APIDict(name="release_unit_tests")
)
self.cluster_env = TEST_CLUSTER_ENV
self.cluster_compute = TEST_CLUSTER_COMPUTE
self.cluster_manager = self.cls(
project_id=UNIT_TEST_PROJECT_ID,
sdk=self.sdk,
test_name=f"unit_test__{self.__class__.__name__}",
)
self.sdk.reset()
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterComputeExisting(self):
# Find existing compute and succeed
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.assertTrue(self.cluster_manager.cluster_compute_name)
self.assertFalse(self.cluster_manager.cluster_compute_id)
self.sdk.returns["search_cluster_computes"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
APIDict(name=self.cluster_manager.cluster_compute_name, id="correct"),
],
)
self.cluster_manager.create_cluster_compute()
self.assertEqual(self.cluster_manager.cluster_compute_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_computes"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterComputeCreateFailFail(self):
# No existing compute, create new, but fail both times
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.assertTrue(self.cluster_manager.cluster_compute_name)
self.assertFalse(self.cluster_manager.cluster_compute_id)
self.sdk.returns["search_cluster_computes"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_compute"] = fail_always
with self.assertRaises(ClusterComputeCreateError):
self.cluster_manager.create_cluster_compute()
# No cluster ID found or created
self.assertFalse(self.cluster_manager.cluster_compute_id)
# Both APIs were called twice (retry after fail)
self.assertEqual(self.sdk.call_counter["search_cluster_computes"], 2)
self.assertEqual(self.sdk.call_counter["create_cluster_compute"], 2)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterComputeCreateFailSucceed(self):
# No existing compute, create new, fail once, succeed afterwards
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.assertTrue(self.cluster_manager.cluster_compute_name)
self.assertFalse(self.cluster_manager.cluster_compute_id)
self.sdk.returns["search_cluster_computes"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_compute"] = fail_once(
result=APIDict(
result=APIDict(
id="correct",
)
)
)
self.cluster_manager.create_cluster_compute()
# Both APIs were called twice (retry after fail)
self.assertEqual(self.cluster_manager.cluster_compute_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_computes"], 2)
self.assertEqual(self.sdk.call_counter["create_cluster_compute"], 2)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterComputeCreateSucceed(self):
# No existing compute, create new, and succeed
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.assertTrue(self.cluster_manager.cluster_compute_name)
self.assertFalse(self.cluster_manager.cluster_compute_id)
self.sdk.returns["search_cluster_computes"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_compute"] = APIDict(
result=APIDict(
id="correct",
)
)
self.cluster_manager.create_cluster_compute()
# Both APIs were called twice (retry after fail)
self.assertEqual(self.cluster_manager.cluster_compute_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_computes"], 1)
self.assertEqual(self.sdk.call_counter["create_cluster_compute"], 1)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterEnvExisting(self):
# Find existing env and succeed
self.cluster_manager.set_cluster_env(self.cluster_env)
self.assertTrue(self.cluster_manager.cluster_env_name)
self.assertFalse(self.cluster_manager.cluster_env_id)
self.sdk.returns["search_cluster_environments"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
APIDict(name=self.cluster_manager.cluster_env_name, id="correct"),
],
)
self.cluster_manager.create_cluster_env()
self.assertEqual(self.cluster_manager.cluster_env_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterEnvFailFail(self):
# No existing compute, create new, but fail both times
self.cluster_manager.set_cluster_env(self.cluster_env)
self.assertTrue(self.cluster_manager.cluster_env_name)
self.assertFalse(self.cluster_manager.cluster_env_id)
self.sdk.returns["search_cluster_environments"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_environment"] = fail_always
with self.assertRaises(ClusterEnvCreateError):
self.cluster_manager.create_cluster_env()
# No cluster ID found or created
self.assertFalse(self.cluster_manager.cluster_env_id)
# Both APIs were called twice (retry after fail)
self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 2)
self.assertEqual(self.sdk.call_counter["create_cluster_environment"], 2)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterEnvFailSucceed(self):
# No existing compute, create new, fail once, succeed afterwards
self.cluster_manager.set_cluster_env(self.cluster_env)
self.assertTrue(self.cluster_manager.cluster_env_name)
self.assertFalse(self.cluster_manager.cluster_env_id)
self.cluster_manager.cluster_env_id = None
self.sdk.reset()
self.sdk.returns["search_cluster_environments"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_environment"] = fail_once(
result=APIDict(
result=APIDict(
id="correct",
)
)
)
self.cluster_manager.create_cluster_env()
# Both APIs were called twice (retry after fail)
self.assertEqual(self.cluster_manager.cluster_env_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 2)
self.assertEqual(self.sdk.call_counter["create_cluster_environment"], 2)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testFindCreateClusterEnvSucceed(self):
# No existing compute, create new, and succeed
self.cluster_manager.set_cluster_env(self.cluster_env)
self.assertTrue(self.cluster_manager.cluster_env_name)
self.assertFalse(self.cluster_manager.cluster_env_id)
self.sdk.returns["search_cluster_environments"] = APIDict(
metadata=APIDict(
next_paging_token=None,
),
results=[
APIDict(
name="no_match",
id="wrong",
),
],
)
self.sdk.returns["create_cluster_environment"] = APIDict(
result=APIDict(
id="correct",
)
)
self.cluster_manager.create_cluster_env()
# Both APIs were called twice (retry after fail)
self.assertEqual(self.cluster_manager.cluster_env_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 1)
self.assertEqual(self.sdk.call_counter["create_cluster_environment"], 1)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvNotFound(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Environment build not found
self.sdk.returns["list_cluster_environment_builds"] = APIDict(results=[])
with self.assertRaisesRegex(ClusterEnvBuildError, "No build found"):
self.cluster_manager.build_cluster_env(timeout=600)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvPreBuildFailed(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build failed on first lookup
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
)
]
)
with self.assertRaisesRegex(ClusterEnvBuildError, "Cluster env build failed"):
self.cluster_manager.build_cluster_env(timeout=600)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvPreBuildSucceeded(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# (Second) build succeeded
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="succeeded",
created_at=1,
),
]
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertTrue(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.cluster_manager.cluster_env_build_id, "build_succeeded")
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterBuildFails(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, but fails after 300 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time, self.assertRaisesRegex(
ClusterEnvBuildError, "Cluster env build failed"
):
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="failed")),
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvBuildTimeout(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, but timeout after 100 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time, self.assertRaisesRegex(
ClusterEnvBuildTimeout, "Time out when building cluster env"
):
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="succeeded")),
)
self.cluster_manager.build_cluster_env(timeout=100)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterBuildSucceed(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, succeed after 300 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time:
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="succeeded")),
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertTrue(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
class FullSessionManagerTest(MinimalSessionManagerTest):
cls = FullClusterManager
def testSessionStartCreationError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = _fail
with self.assertRaises(ClusterCreationError):
self.cluster_manager.start_cluster()
def testSessionStartStartupError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = _fail
with self.assertRaises(ClusterStartupError):
self.cluster_manager.start_cluster()
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupTimeout(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time, self.assertRaises(ClusterStartupTimeout):
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
# Timeout before startup finishes
self.cluster_manager.start_cluster(timeout=200)
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupFailed(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time, self.assertRaises(ClusterStartupFailed):
frozen_time.tick(delta=0.1)
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
self.sdk.returns["get_cluster"] = APIDict(
result=APIDict(state="Terminated")
)
# Timeout is long enough
self.cluster_manager.start_cluster(timeout=400)
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupSuccess(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time:
frozen_time.tick(delta=0.1)
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
self.sdk.returns["get_cluster"] = APIDict(result=APIDict(state="Running"))
# Timeout is long enough
self.cluster_manager.start_cluster(timeout=400)
@unittest.skipUnless(
os.environ.get("RELEASE_UNIT_TEST_NO_ANYSCALE", "0") == "1",
reason="RELEASE_UNIT_TEST_NO_ANYSCALE is set to 1",
)
class LiveSessionManagerTest(unittest.TestCase):
def setUp(self) -> None:
self.sdk = get_anyscale_sdk()
self.cluster_env = TEST_CLUSTER_ENV
self.cluster_compute = TEST_CLUSTER_COMPUTE
self.cluster_manager = FullClusterManager(
project_id=UNIT_TEST_PROJECT_ID,
sdk=self.sdk,
test_name=f"unit_test__{self.__class__.__name__}__endToEnd",
)
def tearDown(self) -> None:
self.cluster_manager.terminate_cluster()
self.cluster_manager.delete_configs()
def testSessionEndToEnd(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.cluster_manager.build_configs(timeout=1200)
# Reset, so that we fetch them again and test that code path
self.cluster_manager.cluster_compute_id = None
self.cluster_manager.cluster_env_id = None
self.cluster_manager.cluster_env_build_id = None
self.cluster_manager.build_configs(timeout=1200)
# Start cluster
self.cluster_manager.start_cluster(timeout=1200)
|
ray-project/ray
|
release/ray_release/tests/test_cluster_manager.py
|
Python
|
apache-2.0
| 24,218
| 0.000991
|
from datetime import time
from datetime import timedelta
import pendulum
from .constants import SECS_PER_HOUR
from .constants import SECS_PER_MIN
from .constants import USECS_PER_SEC
from .duration import AbsoluteDuration
from .duration import Duration
from .mixins.default import FormattableMixin
class Time(FormattableMixin, time):
"""
Represents a time instance as hour, minute, second, microsecond.
"""
# String formatting
def __repr__(self):
us = ""
if self.microsecond:
us = f", {self.microsecond}"
tzinfo = ""
if self.tzinfo:
tzinfo = ", tzinfo={}".format(repr(self.tzinfo))
return "{}({}, {}, {}{}{})".format(
self.__class__.__name__, self.hour, self.minute, self.second, us, tzinfo
)
# Comparisons
def closest(self, dt1, dt2):
"""
Get the closest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
def farthest(self, dt1, dt2):
"""
Get the farthest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds():
return dt1
return dt2
# ADDITIONS AND SUBSTRACTIONS
def add(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.add(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def subtract(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.subtract(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def add_timedelta(self, delta):
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot add timedelta with days to Time.")
return self.add(seconds=delta.seconds, microseconds=delta.microseconds)
def subtract_timedelta(self, delta):
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot subtract timedelta with days to Time.")
return self.subtract(seconds=delta.seconds, microseconds=delta.microseconds)
def __add__(self, other):
if not isinstance(other, timedelta):
return NotImplemented
return self.add_timedelta(other)
def __sub__(self, other):
if not isinstance(other, (Time, time, timedelta)):
return NotImplemented
if isinstance(other, timedelta):
return self.subtract_timedelta(other)
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.diff(self, False)
def __rsub__(self, other):
if not isinstance(other, (Time, time)):
return NotImplemented
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.__sub__(self)
# DIFFERENCES
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1)
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
:type other: Time or time
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = pendulum.now().time()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
# Compatibility methods
def replace(
self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True
):
if tzinfo is True:
tzinfo = self.tzinfo
hour = hour if hour is not None else self.hour
minute = minute if minute is not None else self.minute
second = second if second is not None else self.second
microsecond = microsecond if microsecond is not None else self.microsecond
t = super().replace(hour, minute, second, microsecond, tzinfo=tzinfo)
return self.__class__(
t.hour, t.minute, t.second, t.microsecond, tzinfo=t.tzinfo
)
def __getnewargs__(self):
return (self,)
def _get_state(self, protocol=3):
tz = self.tzinfo
return (self.hour, self.minute, self.second, self.microsecond, tz)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return self.__class__, self._get_state(protocol)
Time.min = Time(0, 0, 0)
Time.max = Time(23, 59, 59, 999999)
Time.resolution = Duration(microseconds=1)
|
sdispater/pendulum
|
pendulum/time.py
|
Python
|
mit
| 7,783
| 0.001156
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
__all__ = []
class LoggerFactory:
@staticmethod
def build_logger(name=None, level=logging.INFO):
assert name is not None, "name for logger should not be None"
formatter = logging.Formatter(
"%(asctime)s-%(levelname)s: "
"[%(filename)s:%(lineno)d:%(funcName)s] %(message)s")
_logger = logging.getLogger(name)
_logger.setLevel(level)
_logger.propagate = False
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
handler.setLevel(level)
_logger.addHandler(handler)
return _logger
logger = LoggerFactory.build_logger(name="HybridParallel", level=logging.INFO)
def layer_to_str(base, *args, **kwargs):
name = base + "("
if args:
name += ", ".join(str(arg) for arg in args)
if kwargs:
name += ", "
if kwargs:
name += ", ".join("{}={}".format(key, str(value))
for key, value in kwargs.items())
name += ")"
return name
|
luotao1/Paddle
|
python/paddle/distributed/fleet/utils/log_util.py
|
Python
|
apache-2.0
| 1,685
| 0.00178
|
# -*- coding: utf-8 -*-
#
# This file is part of the Christine project
#
# Copyright (c) 2006-2007 Marco Antonio Islas Cruz
#
# Christine is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Christine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @category libchristine
# @package Share
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
# @author Marco Antonio Islas Cruz <markuz@islascruz.org>
# @copyright 2007-2009 Christine Development Group
# @license http://www.gnu.org/licenses/gpl.txt
#import gtk.glade
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
from libchristine.Validator import *
from libchristine.pattern.Singleton import Singleton
from libchristine.gui.GtkMisc import glade_xml
from libchristine.globalvars import DATADIR, SHARE_PATH
from libchristine.Logger import LoggerManager
from libchristine.options import options
import time
import os
import gtk
import sys
import gobject
#
# Share class manager for images, glade
# templates and more files
#
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
class Share(Singleton):
"""
Share class manager for images, glade
templates and more files
"""
#
# Directory where we have template files
#
# @var string
__PathTemplate = None
#
# Directory where we have images
#
# @var string
__PathPixmap = None
def __init__(self):
"""
Constructor
"""
self.setName('Share')
self.__logger = LoggerManager().getLogger('Share')
self.__PathTemplate = os.path.join(SHARE_PATH, 'gui')
self.__PathPixmap = os.path.join(self.__PathTemplate, 'pixmaps')
#self.__Pixmaps, used to store a pixmap. if it is here then reuse it
#instead of creating another one from the same faile
self.__Pixmaps = {}
gobject.timeout_add(1000, self.check_pixmap_time_access)
def getTemplate(self, file, root = None):
"""
Gets glade template
@param string file: file to load
@param string root: root widget to return instead the main window
"""
if file:
file = ''.join([file, '.glade'])
if isFile(os.path.join(self.__PathTemplate, file)):
return glade_xml(os.path.join(self.__PathTemplate, file),root)
self.__logger.warning('File %s was not found'%(os.path.join(self.__PathTemplate, file)))
return None
def getImage(self, name):
"""
Gets image as path string
"""
if ((not isNull(file)) or (isStringEmpty(name))):
if (isFile(os.path.join(self.__PathPixmap, name+'.png'))):
return os.path.join(self.__PathPixmap, name+'.png')
elif (isFile(os.path.join(self.__PathPixmap, name+ '.svg'))):
return os.path.join(self.__PathPixmap, name+'.svg')
return None
def getImageFromPix(self, name):
"""
Gets image from pixbuf
"""
icon_theme = gtk.icon_theme_get_default()
if icon_theme.has_icon(name):
pixbuf = icon_theme.load_icon(name, 48, 0)
return pixbuf
else:
return self.load_from_local_dir(name)
def load_from_local_dir(self, name):
if not name: return
files = os.listdir(self.__PathPixmap)
filesf = [k for k in files if len(k.split('.')) > 1 \
and k.split('.')[0].startswith(name)]
if not filesf:
self.__logger.warning('None of this files \n%s\n where found'%repr(name))
return
filepath = os.path.join(self.__PathPixmap, filesf[0])
pixdir = self.__Pixmaps.get(filepath,{})
if not pixdir:
self.__Pixmaps[filepath] = pixdir
pixmap = gtk.gdk.pixbuf_new_from_file(filepath)
self.__Pixmaps[filepath]['pixmap'] = pixmap
self.__Pixmaps[filepath]['timestamp'] = time.time()
return self.__Pixmaps[filepath]['pixmap']
def check_pixmap_time_access(self):
'''
Check the last time access to a pixmap, if the diference between
the current time and the last access time is more than 600 senconds
(10 minutes) then it will erase the pixmap.
'''
c ={}
ctime = time.time()
for key, value in self.__Pixmaps.iteritems():
if ctime - value['timestamp'] < 60:
c[key] = value
self.__Pixmaps = c.copy()
#del c
return True
|
markuz/Christine
|
libchristine/Share.py
|
Python
|
gpl-2.0
| 5,045
| 0.004559
|
#!/usr/bin/env python
"""Exponential and Quaternion code for Lab 6.
Course: EE 106, Fall 2015
Author: Victor Shia, 9/24/15
This Python file is a code skeleton Lab 6 which calculates the rigid body transform
given a rotation / translation and computes the twist from rigid body transform.
When you think you have the methods implemented correctly, you can test your
code by running "python exp_quat_func.py at the command line.
This code requires the NumPy and SciPy libraries and kin_func_skeleton which you
should have written in lab 3. If you don't already have
these installed on your personal computer, you can use the lab machines or
the Ubuntu+ROS VM on the course page to complete this portion of the homework.
"""
import tf
import rospy
import sys
from math import *
import numpy as np
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import Transform, Vector3
import kin_func_skeleton as kfs
def quaternion_to_exp(rot):
"""
Converts a quaternion vector in 3D to its corresponding omega and theta.
This uses the quaternion -> exponential coordinate equation given in Lab 6
Args:
rot - a (4,) nd array or 4x1 array: the quaternion vector (\vec{q}, q_o)
Returns:
omega - (3,) ndarray: the rotation vector
theta - a scalar
"""
#YOUR CODE HERE
theta = 2.0 * np.arccos(rot[-1])
if theta == 0:
omega = np.array([0.0, 0.0, 0.0])
else:
omega = ((1.0/sin(theta/2.0)) * rot[:-1])
return (omega, theta)
def create_rbt(omega, theta, p):
"""
Creates a rigid body transform using omega, theta, and the translation component.
g = [R,p; 0,1], where R = exp(omega * theta), p = trans
Args:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
trans - (3,) ndarray or 3x1 array: the translation component of the rigid body motion
Returns:
g - (4,4) ndarray : the rigid body transform
"""
#YOUR CODE HERE
R = kfs.rotation_3d(omega, theta)
g = np.array([[R[0][0], R[0][1], R[0][2], p[0]], [R[1][0], R[1][1], R[1][2], p[1]], [R[2][0], R[2][1], R[2][2], p[2]], [0, 0, 0, 1]])
return g
def compute_gab(g0a,g0b):
"""
Creates a rigid body transform g_{ab} the converts between frame A and B
given the coordinate frame A,B in relation to the origin
Args:
g0a - (4,4) ndarray : the rigid body transform from the origin to frame A
g0b - (4,4) ndarray : the rigid body transform from the origin to frame B
Returns:
gab - (4,4) ndarray : the rigid body transform
"""
#YOUR CODE HERE
gab = np.dot(np.linalg.inv(g0a),g0b)
return gab
def find_omega_theta(R):
"""
Given a rotation matrix R, finds the omega and theta such that R = exp(omega * theta)
Args:
R - (3,3) ndarray : the rotational component of the rigid body transform
Returns:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
"""
#YOUR CODE HERE
theta = np.arccos((np.trace(R) - 1)/2)
omega = (1/(2*sin(theta)))*np.array([R[2][1] - R[1][2],R[0][2] - R[2][0],R[1][0] - R[0][1]])
return (omega, theta)
def find_v(omega, theta, trans):
"""
Finds the linear velocity term of the twist (v,omega) given omega, theta and translation
Args:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
trans - (3,) ndarray of 3x1 list : the translation component of the rigid body transform
Returns:
v - (3,1) ndarray : the linear velocity term of the twist (v,omega)
"""
#YOUR CODE HERE
A_1 = np.eye(3) - kfs.rotation_3d(omega, theta)
#print A_1
A_1 = A_1.dot(kfs.skew_3d(omega))
#print A_1
A_2 = np.outer(omega, omega.T)*theta
#print A_2
A = A_1 + A_2
#print A
#print np.linalg.inv(A)
v = np.dot(np.linalg.inv(A), trans)
#print v
return np.array([v]).T
#-----------------------------Testing code--------------------------------------
#-------------(you shouldn't need to modify anything below here)----------------
def array_func_test(func_name, args, ret_desired):
ret_value = func_name(*args)
if not isinstance(ret_value, np.ndarray):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a NumPy ndarray')
elif ret_value.shape != ret_desired.shape:
print('[FAIL] ' + func_name.__name__ + '() returned an ndarray with incorrect dimensions')
elif not np.allclose(ret_value, ret_desired, rtol=1e-3):
print('[FAIL] ' + func_name.__name__ + '() returned an incorrect value')
else:
print('[PASS] ' + func_name.__name__ + '() returned the correct value!')
def array_func_test_two_outputs(func_name, args, ret_desireds):
ret_values = func_name(*args)
for i in range(2):
ret_value = ret_values[i]
ret_desired = ret_desireds[i]
if i == 0 and not isinstance(ret_value, np.ndarray):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a NumPy ndarray')
elif i == 1 and not isinstance(ret_value, float):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a float')
elif i == 0 and ret_value.shape != ret_desired.shape:
print('[FAIL] ' + func_name.__name__ + '() returned an ndarray with incorrect dimensions')
elif not np.allclose(ret_value, ret_desired, rtol=1e-3):
print('[FAIL] ' + func_name.__name__ + '() returned an incorrect value')
else:
print('[PASS] ' + func_name.__name__ + '() returned the argument %d value!' % i)
if __name__ == "__main__":
print('Testing...')
#Test quaternion_to_exp()
arg1 = np.array([1.0, 2, 3, 0.1])
func_args = (arg1,)
ret_desired = (np.array([1.005, 2.0101, 3.0151]), 2.94125)
array_func_test_two_outputs(quaternion_to_exp, func_args, ret_desired)
#Test create_rbt()
arg1 = np.array([1.0, 2, 3])
arg2 = 2
arg3 = np.array([0.5,-0.5,1])
func_args = (arg1,arg2,arg3)
ret_desired = np.array(
[[ 0.4078, -0.6562, 0.6349, 0.5 ],
[ 0.8384, 0.5445, 0.0242, -0.5 ],
[-0.3616, 0.5224, 0.7722, 1. ],
[ 0. , 0. , 0. , 1. ]])
array_func_test(create_rbt, func_args, ret_desired)
#Test compute_gab(g0a,g0b)
g0a = np.array(
[[ 0.4078, -0.6562, 0.6349, 0.5 ],
[ 0.8384, 0.5445, 0.0242, -0.5 ],
[-0.3616, 0.5224, 0.7722, 1. ],
[ 0. , 0. , 0. , 1. ]])
g0b = np.array(
[[-0.6949, 0.7135, 0.0893, 0.5 ],
[-0.192 , -0.3038, 0.9332, -0.5 ],
[ 0.693 , 0.6313, 0.3481, 1. ],
[ 0. , 0. , 0. , 1. ]])
func_args = (g0a, g0b)
ret_desired = np.array([[-0.6949, -0.192 , 0.693 , 0. ],
[ 0.7135, -0.3038, 0.6313, 0. ],
[ 0.0893, 0.9332, 0.3481, 0. ],
[ 0. , 0. , 0. , 1. ]])
array_func_test(compute_gab, func_args, ret_desired)
#Test find_omega_theta
R = np.array(
[[ 0.4078, -0.6562, 0.6349 ],
[ 0.8384, 0.5445, 0.0242 ],
[-0.3616, 0.5224, 0.7722 ]])
func_args = (R,)
ret_desired = (np.array([ 0.2673, 0.5346, 0.8018]), 1.2001156089449496)
array_func_test_two_outputs(find_omega_theta, func_args, ret_desired)
#Test find_v
arg1 = np.array([1.0, 2, 3])
arg2 = 1
arg3 = np.array([0.5,-0.5,1])
func_args = (arg1,arg2,arg3)
ret_desired = np.array([[-0.1255],
[ 0.0431],
[ 0.0726]])
array_func_test(find_v, func_args, ret_desired)
|
AravindK95/ee106b
|
project1/src/lab1/src/exp_quat_func.py
|
Python
|
mit
| 7,708
| 0.018422
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module for the ops not belonging to the official numpy package."""
from . import _op
from . import image
from . import _register
from ._op import * # pylint: disable=wildcard-import
__all__ = _op.__all__
|
reminisce/mxnet
|
python/mxnet/symbol/numpy_extension/__init__.py
|
Python
|
apache-2.0
| 996
| 0
|
# A SCons tool for R scripts
#
# Copyright (c) 2014 Kendrick Boyd. This is free software. See LICENSE
# for details.
"""
Basic test of producing output using save.
"""
import TestSCons
test = TestSCons.TestSCons()
# Add scons_r tool to test figure.
test.file_fixture('../__init__.py', 'site_scons/site_tools/scons_r/__init__.py')
test.write(['SConstruct'], """\
import os
env = Environment(TOOLS = ['scons_r'])
env.R('basic.r')
""")
test.write(['basic.r'], """\
x=rnorm(100)
save(x, file='x.rdata')
""")
test.run(arguments='.', stderr=None)
test.must_exist('x.rdata')
test.pass_test()
|
kboyd/scons_r
|
test/basic.py
|
Python
|
bsd-2-clause
| 596
| 0.001678
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from markitup import settings
from markitup.markup import filter_func
from markitup.sanitize import sanitize_html
def apply_filter(request):
cleaned_data = sanitize_html(request.POST.get('data', ''), strip=True)
markup = filter_func(cleaned_data)
return render_to_response( 'markitup/preview.html',
{'preview': markup},
context_instance=RequestContext(request))
|
thoslin/django-markitup
|
markitup/views.py
|
Python
|
bsd-3-clause
| 527
| 0.00759
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
TF_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels.h5')
TF_WEIGHTS_PATH_NO_TOP = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.xception.Xception',
'keras.applications.Xception')
def Xception(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note that the default input image size for this model is 299x299.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.xception.preprocess_input` for an example.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
x = layers.Conv2D(
32, (3, 3),
strides=(2, 2),
use_bias=False,
name='block1_conv1')(img_input)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
x = layers.Activation('relu', name='block1_conv1_act')(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
x = layers.Activation('relu', name='block1_conv2_act')(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
x = layers.Activation('relu', name='block2_sepconv2_act')(x)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block2_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block3_sepconv1_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
x = layers.Activation('relu', name='block3_sepconv2_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block3_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block4_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
x = layers.Activation('relu', name='block4_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv1_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv2_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv3')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block13_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block13_sepconv1_bn')(x)
x = layers.Activation('relu', name='block13_sepconv2_act')(x)
x = layers.SeparableConv2D(
1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block13_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block13_pool')(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(
1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block14_sepconv1_bn')(x)
x = layers.Activation('relu', name='block14_sepconv1_act')(x)
x = layers.SeparableConv2D(
2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name='block14_sepconv2_bn')(x)
x = layers.Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='xception')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = data_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.xception.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.xception.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
aldian/tensorflow
|
tensorflow/python/keras/applications/xception.py
|
Python
|
apache-2.0
| 13,084
| 0.006649
|
from __future__ import absolute_import, division, print_function
class _slice(object):
""" A hashable slice object
>>> _slice(0, 10, None)
0:10
"""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
def __hash__(self):
return hash((slice, self.start, self.stop, self.step))
def __str__(self):
s = ''
if self.start is not None:
s = s + str(self.start)
s = s + ':'
if self.stop is not None:
s = s + str(self.stop)
if self.step is not None:
s = s + ':' + str(self.step)
return s
def __eq__(self, other):
return (type(self), self.start, self.stop, self.step) == \
(type(other), other.start, other.stop, other.step)
def as_slice(self):
return slice(self.start, self.stop, self.step)
__repr__ = __str__
class hashable_list(tuple):
def __str__(self):
return str(list(self))
def hashable_index(index):
""" Convert slice-thing into something hashable
>>> hashable_index(1)
1
>>> isinstance(hash(hashable_index((1, slice(10)))), int)
True
"""
if type(index) is tuple: # can't do isinstance due to hashable_list
return tuple(map(hashable_index, index))
elif isinstance(index, list):
return hashable_list(index)
elif isinstance(index, slice):
return _slice(index.start, index.stop, index.step)
return index
def replace_slices(index):
if isinstance(index, hashable_list):
return list(index)
elif isinstance(index, _slice):
return index.as_slice()
elif isinstance(index, tuple):
return tuple(map(replace_slices, index))
return index
|
mrocklin/blaze
|
blaze/expr/utils.py
|
Python
|
bsd-3-clause
| 1,782
| 0.001684
|
#!/usr/bin/env python
__author__ = 'Adam R. Smith, Michael Meisinger, Dave Foster <dfoster@asascience.com>'
import threading
import traceback
import gevent
from gevent import greenlet, Timeout
from gevent.event import Event, AsyncResult
from gevent.queue import Queue
from pyon.core import MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import IonException, ContainerError
from pyon.core.exception import Timeout as IonTimeout
from pyon.core.thread import PyonThreadManager, PyonThread, ThreadManager, PyonThreadTraceback, PyonHeartbeatError
from pyon.datastore.postgresql.pg_util import init_db_stats, get_db_stats, clear_db_stats
from pyon.ion.service import BaseService
from pyon.util.containers import get_ion_ts, get_ion_ts_millis
from pyon.util.log import log
STAT_INTERVAL_LENGTH = 60000 # Interval time for process saturation stats collection
stats_callback = None
class OperationInterruptedException(BaseException):
"""
Interrupted exception. Used by external items timing out execution in the
IonProcessThread's control thread.
Derived from BaseException to specifically avoid try/except Exception blocks,
such as in Publisher's publish_event.
"""
pass
class IonProcessError(StandardError):
pass
class IonProcessThread(PyonThread):
"""
The control part of an ION process.
"""
def __init__(self, target=None, listeners=None, name=None, service=None, cleanup_method=None,
heartbeat_secs=10, **kwargs):
"""
Constructs the control part of an ION process.
Used by the container's IonProcessThreadManager, as part of spawn_process.
@param target A callable to run in the PyonThread. If None (typical), will use the target method
defined in this class.
@param listeners A list of listening endpoints attached to this thread.
@param name The name of this ION process.
@param service An instance of the BaseService derived class which contains the business logic for
the ION process.
@param cleanup_method An optional callable to run when the process is stopping. Runs after all other
notify_stop calls have run. Should take one param, this instance.
@param heartbeat_secs Number of seconds to wait in between heartbeats.
"""
self._startup_listeners = listeners or []
self.listeners = []
self._listener_map = {}
self.name = name
self.service = service
self._cleanup_method = cleanup_method
self.thread_manager = ThreadManager(failure_notify_callback=self._child_failed) # bubbles up to main thread manager
self._dead_children = [] # save any dead children for forensics
self._ctrl_thread = None
self._ctrl_queue = Queue()
self._ready_control = Event()
self._errors = []
self._ctrl_current = None # set to the AR generated by _routing_call when in the context of a call
# processing vs idle time (ms)
self._start_time = None
self._proc_time = 0 # busy time since start
self._proc_time_prior = 0 # busy time at the beginning of the prior interval
self._proc_time_prior2 = 0 # busy time at the beginning of 2 interval's ago
self._proc_interval_num = 0 # interval num of last record
# for heartbeats, used to detect stuck processes
self._heartbeat_secs = heartbeat_secs # amount of time to wait between heartbeats
self._heartbeat_stack = None # stacktrace of last heartbeat
self._heartbeat_time = None # timestamp of heart beat last matching the current op
self._heartbeat_op = None # last operation (by AR)
self._heartbeat_count = 0 # number of times this operation has been seen consecutively
self._log_call_exception = CFG.get_safe("container.process.log_exceptions", False)
self._log_call_dbstats = CFG.get_safe("container.process.log_dbstats", False)
self._warn_call_dbstmt_threshold = CFG.get_safe("container.process.warn_dbstmt_threshold", 0)
PyonThread.__init__(self, target=target, **kwargs)
def heartbeat(self):
"""
Returns a 3-tuple indicating everything is ok.
Should only be called after the process has been started.
Checks the following:
- All attached endpoints are alive + listening (this means ready)
- The control flow greenlet is alive + listening or processing
@return 3-tuple indicating (listeners ok, ctrl thread ok, heartbeat status). Use all on it for a
boolean indication of success.
"""
listeners_ok = True
for l in self.listeners:
if not (l in self._listener_map and not self._listener_map[l].proc.dead and l.get_ready_event().is_set()):
listeners_ok = False
ctrl_thread_ok = self._ctrl_thread.running
# are we currently processing something?
heartbeat_ok = True
if self._ctrl_current is not None:
st = traceback.extract_stack(self._ctrl_thread.proc.gr_frame)
if self._ctrl_current == self._heartbeat_op:
if st == self._heartbeat_stack:
self._heartbeat_count += 1 # we've seen this before! increment count
# we've been in this for the last X ticks, or it's been X seconds, fail this part of the heartbeat
if self._heartbeat_count > CFG.get_safe('container.timeout.heartbeat_proc_count_threshold', 30) or \
get_ion_ts_millis() - int(self._heartbeat_time) >= CFG.get_safe('container.timeout.heartbeat_proc_time_threshold', 30) * 1000:
heartbeat_ok = False
else:
# it's made some progress
self._heartbeat_count = 1
self._heartbeat_stack = st
self._heartbeat_time = get_ion_ts()
else:
self._heartbeat_op = self._ctrl_current
self._heartbeat_count = 1
self._heartbeat_time = get_ion_ts()
self._heartbeat_stack = st
else:
self._heartbeat_op = None
self._heartbeat_count = 0
#log.debug("%s %s %s", listeners_ok, ctrl_thread_ok, heartbeat_ok)
return (listeners_ok, ctrl_thread_ok, heartbeat_ok)
@property
def time_stats(self):
"""
Returns a 5-tuple of (total time, idle time, processing time, time since prior interval start,
busy since prior interval start), all in ms (int).
"""
now = get_ion_ts_millis()
running_time = now - self._start_time
idle_time = running_time - self._proc_time
cur_interval = now / STAT_INTERVAL_LENGTH
now_since_prior = now - (cur_interval - 1) * STAT_INTERVAL_LENGTH
if cur_interval == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior2
elif cur_interval-1 == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior
else:
proc_time_since_prior = 0
return (running_time, idle_time, self._proc_time, now_since_prior, proc_time_since_prior)
def _child_failed(self, child):
"""
Callback from gevent as set in the TheadManager, when a child greenlet fails.
Kills the ION process main greenlet. This propagates the error up to the process supervisor.
"""
# remove the child from the list of children (so we can shut down cleanly)
for x in self.thread_manager.children:
if x.proc == child:
self.thread_manager.children.remove(x)
break
self._dead_children.append(child)
# kill this process's main greenlet. This should be noticed by the container's proc manager
self.proc.kill(child.exception)
def add_endpoint(self, listener, activate=True):
"""
Adds a listening endpoint to be managed by this ION process.
Spawns the listen loop and sets the routing call to synchronize incoming messages
here. If this process hasn't been started yet, adds it to the list of listeners
to start on startup.
@param activate If True (default), start consuming from listener
"""
if self.proc:
listener.routing_call = self._routing_call
if self.name:
svc_name = "unnamed-service"
if self.service is not None and hasattr(self.service, 'name'):
svc_name = self.service.name
listen_thread_name = "%s-%s-listen-%s" % (svc_name, self.name, len(self.listeners)+1)
else:
listen_thread_name = "unknown-listener-%s" % (len(self.listeners)+1)
listen_thread = self.thread_manager.spawn(listener.listen, thread_name=listen_thread_name, activate=activate)
listen_thread.proc._glname = "ION Proc listener %s" % listen_thread_name
self._listener_map[listener] = listen_thread
self.listeners.append(listener)
else:
self._startup_listeners.append(listener)
def remove_endpoint(self, listener):
"""
Removes a listening endpoint from management by this ION process.
If the endpoint is unknown to this ION process, raises an error.
@return The PyonThread running the listen loop, if it exists. You are
responsible for closing it when appropriate.
"""
if listener in self.listeners:
self.listeners.remove(listener)
return self._listener_map.pop(listener)
elif listener in self._startup_listeners:
self._startup_listeners.remove(listener)
return None
else:
raise IonProcessError("Cannot remove unrecognized listener: %s" % listener)
def target(self, *args, **kwargs):
"""
Entry point for the main process greenlet.
Setup the base properties for this process (mainly the control thread).
"""
if self.name:
threading.current_thread().name = "%s-target" % self.name
# start time
self._start_time = get_ion_ts_millis()
self._proc_interval_num = self._start_time / STAT_INTERVAL_LENGTH
# spawn control flow loop
self._ctrl_thread = self.thread_manager.spawn(self._control_flow)
self._ctrl_thread.proc._glname = "ION Proc CL %s" % self.name
# wait on control flow loop, heartbeating as appropriate
while not self._ctrl_thread.ev_exit.wait(timeout=self._heartbeat_secs):
hbst = self.heartbeat()
if not all(hbst):
log.warn("Heartbeat status for process %s returned %s", self, hbst)
if self._heartbeat_stack is not None:
stack_out = "".join(traceback.format_list(self._heartbeat_stack))
else:
stack_out = "N/A"
#raise PyonHeartbeatError("Heartbeat failed: %s, stacktrace:\n%s" % (hbst, stack_out))
log.warn("Heartbeat failed: %s, stacktrace:\n%s", hbst, stack_out)
# this is almost a no-op as we don't fall out of the above loop without
# exiting the ctrl_thread, but having this line here makes testing much easier.
self._ctrl_thread.join()
def _routing_call(self, call, context, *callargs, **callkwargs):
"""
Endpoints call into here to synchronize across the entire IonProcess.
Returns immediately with an AsyncResult that can be waited on. Calls
are made by the loop in _control_flow. We pass in the calling greenlet so
exceptions are raised in the correct context.
@param call The call to be made within this ION processes' calling greenlet.
@param callargs The keyword args to pass to the call.
@param context Optional process-context (usually the headers of the incoming call) to be
set. Process-context is greenlet-local, and since we're crossing greenlet
boundaries, we must set it again in the ION process' calling greenlet.
"""
ar = AsyncResult()
if len(callargs) == 0 and len(callkwargs) == 0:
log.trace("_routing_call got no arguments for the call %s, check your call's parameters", call)
self._ctrl_queue.put((greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
return ar
def has_pending_call(self, ar):
"""
Returns true if the call (keyed by the AsyncResult returned by _routing_call) is still pending.
"""
for _, qar, _, _, _, _ in self._ctrl_queue.queue:
if qar == ar:
return True
return False
def _cancel_pending_call(self, ar):
"""
Cancels a pending call (keyed by the AsyncResult returend by _routing_call).
@return True if the call was truly pending.
"""
if self.has_pending_call(ar):
ar.set(False)
return True
return False
def _interrupt_control_thread(self):
"""
Signal the control flow thread that it needs to abort processing, likely due to a timeout.
"""
self._ctrl_thread.proc.kill(exception=OperationInterruptedException, block=False)
def cancel_or_abort_call(self, ar):
"""
Either cancels a future pending call, or aborts the current processing if the given AR is unset.
The pending call is keyed by the AsyncResult returned by _routing_call.
"""
if not self._cancel_pending_call(ar) and not ar.ready():
self._interrupt_control_thread()
def _control_flow(self):
"""
Entry point for process control thread of execution.
This method is run by the control greenlet for each ION process. Listeners attached
to the process, either RPC Servers or Subscribers, synchronize calls to the process
by placing call requests into the queue by calling _routing_call.
This method blocks until there are calls to be made in the synchronized queue, and
then calls from within this greenlet. Any exception raised is caught and re-raised
in the greenlet that originally scheduled the call. If successful, the AsyncResult
created at scheduling time is set with the result of the call.
"""
svc_name = getattr(self.service, "name", "unnamed-service") if self.service else "unnamed-service"
proc_id = getattr(self.service, "id", "unknown-pid") if self.service else "unknown-pid"
if self.name:
threading.current_thread().name = "%s-%s" % (svc_name, self.name)
thread_base_name = threading.current_thread().name
self._ready_control.set()
for calltuple in self._ctrl_queue:
calling_gl, ar, call, callargs, callkwargs, context = calltuple
request_id = (context or {}).get("request-id", None)
if request_id:
threading.current_thread().name = thread_base_name + "-" + str(request_id)
#log.debug("control_flow making call: %s %s %s (has context: %s)", call, callargs, callkwargs, context is not None)
res = None
start_proc_time = get_ion_ts_millis()
self._record_proc_time(start_proc_time)
# check context for expiration
if context is not None and 'reply-by' in context:
if start_proc_time >= int(context['reply-by']):
log.info("control_flow: attempting to process message already exceeding reply-by, ignore")
# raise a timeout in the calling thread to allow endpoints to continue processing
e = IonTimeout("Reply-by time has already occurred (reply-by: %s, op start time: %s)" % (context['reply-by'], start_proc_time))
calling_gl.kill(exception=e, block=False)
continue
# If ar is set, means it is cancelled
if ar.ready():
log.info("control_flow: attempting to process message that has been cancelled, ignore")
continue
init_db_stats()
try:
# ******************************************************************
# ****** THIS IS WHERE THE RPC OPERATION/SERVICE CALL IS MADE ******
with self.service.push_context(context), \
self.service.container.context.push_context(context):
self._ctrl_current = ar
res = call(*callargs, **callkwargs)
# ****** END CALL, EXCEPTION HANDLING FOLLOWS ******
# ******************************************************************
except OperationInterruptedException:
# endpoint layer takes care of response as it's the one that caused this
log.debug("Operation interrupted")
pass
except Exception as e:
if self._log_call_exception:
log.exception("PROCESS exception: %s" % e.message)
# Raise the exception in the calling greenlet.
# Try decorating the args of the exception with the true traceback -
# this should be reported by ThreadManager._child_failed
exc = PyonThreadTraceback("IonProcessThread _control_flow caught an exception "
"(call: %s, *args %s, **kwargs %s, context %s)\n"
"True traceback captured by IonProcessThread' _control_flow:\n\n%s" % (
call, callargs, callkwargs, context, traceback.format_exc()))
e.args = e.args + (exc,)
if isinstance(e, (TypeError, IonException)):
# Pass through known process exceptions, in particular IonException
calling_gl.kill(exception=e, block=False)
else:
# Otherwise, wrap unknown, forward and hopefully we can continue on our way
self._errors.append((call, callargs, callkwargs, context, e, exc))
log.warn(exc)
log.warn("Attempting to continue...")
# Note: Too large exception string will crash the container (when passed on as msg header).
exception_str = str(exc)
if len(exception_str) > 10000:
exception_str = (
"Exception string representation too large. "
"Begin and end of the exception:\n"
+ exception_str[:2000] + "\n...\n" + exception_str[-2000:]
)
calling_gl.kill(exception=ContainerError(exception_str), block=False)
finally:
try:
# Compute statistics
self._compute_proc_stats(start_proc_time)
db_stats = get_db_stats()
if db_stats:
if self._warn_call_dbstmt_threshold > 0 and db_stats.get("count.all", 0) >= self._warn_call_dbstmt_threshold:
stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
log.warn("PROC_OP '%s.%s' EXCEEDED DB THRESHOLD. stats=%s", svc_name, call.__name__, stats_str)
elif self._log_call_dbstats:
stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
log.info("PROC_OP '%s.%s' DB STATS: %s", svc_name, call.__name__, stats_str)
clear_db_stats()
if stats_callback:
stats_callback(proc_id=proc_id, proc_name=self.name, svc=svc_name, op=call.__name__,
request_id=request_id, context=context,
db_stats=db_stats, proc_stats=self.time_stats, result=res, exc=None)
except Exception:
log.exception("Error computing process call stats")
self._ctrl_current = None
threading.current_thread().name = thread_base_name
# Set response in AsyncEvent of caller (endpoint greenlet)
ar.set(res)
def _record_proc_time(self, cur_time):
""" Keep the _proc_time of the prior and prior-prior intervals for stats computation
"""
cur_interval = cur_time / STAT_INTERVAL_LENGTH
if cur_interval == self._proc_interval_num:
# We're still in the same interval - no update
pass
elif cur_interval-1 == self._proc_interval_num:
# Record the stats from the prior interval
self._proc_interval_num = cur_interval
self._proc_time_prior2 = self._proc_time_prior
self._proc_time_prior = self._proc_time
elif cur_interval-1 > self._proc_interval_num:
# We skipped an entire interval - everything is prior2
self._proc_interval_num = cur_interval
self._proc_time_prior2 = self._proc_time
self._proc_time_prior = self._proc_time
def _compute_proc_stats(self, start_proc_time):
cur_time = get_ion_ts_millis()
self._record_proc_time(cur_time)
proc_time = cur_time - start_proc_time
self._proc_time += proc_time
def start_listeners(self):
"""
Starts all listeners in managed greenlets.
Usually called by the ProcManager, unless using IonProcess manually.
"""
try:
# disable normal error reporting, this method should only be called from startup
self.thread_manager._failure_notify_callback = None
# spawn all listeners in startup listeners (from initializer, or added later)
for listener in self._startup_listeners:
self.add_endpoint(listener)
with Timeout(seconds=CFG.get_safe('container.messaging.timeout.start_listener', 30)):
gevent.wait([x.get_ready_event() for x in self.listeners])
except Timeout:
# remove failed endpoints before reporting failure above
for listener, proc in self._listener_map.iteritems():
if proc.proc.dead:
log.info("removed dead listener: %s", listener)
self.listeners.remove(listener)
self.thread_manager.children.remove(proc)
raise IonProcessError("start_listeners did not complete in expected time")
finally:
self.thread_manager._failure_notify_callback = self._child_failed
def _notify_stop(self):
"""
Called when the process is about to be shut down.
Instructs all listeners to close, puts a StopIteration into the synchronized queue,
and waits for the listeners to close and for the control queue to exit.
"""
for listener in self.listeners:
try:
listener.close()
except Exception as ex:
tb = traceback.format_exc()
log.warn("Could not close listener, attempting to ignore: %s\nTraceback:\n%s", ex, tb)
self._ctrl_queue.put(StopIteration)
# wait_children will join them and then get() them, which may raise an exception if any of them
# died with an exception.
self.thread_manager.wait_children(30)
PyonThread._notify_stop(self)
# run the cleanup method if we have one
if self._cleanup_method is not None:
try:
self._cleanup_method(self)
except Exception as ex:
log.warn("Cleanup method error, attempting to ignore: %s\nTraceback: %s", ex, traceback.format_exc())
def get_ready_event(self):
"""
Returns an Event that is set when the control greenlet is up and running.
"""
return self._ready_control
class IonProcessThreadManager(PyonThreadManager):
def _create_thread(self, target=None, **kwargs):
return IonProcessThread(target=target, heartbeat_secs=self.heartbeat_secs, **kwargs)
# ---------------------------------------------------------------------------------------------------
# Process type variants
class StandaloneProcess(BaseService):
"""
A process is an ION process of type "standalone" that has an incoming messaging
attachment for the process and operations as defined in a service YML.
"""
process_type = "standalone"
class SimpleProcess(BaseService):
"""
A simple process is an ION process of type "simple" that has no incoming messaging
attachment.
"""
process_type = "simple"
class ImmediateProcess(BaseService):
"""
An immediate process is an ION process of type "immediate" that does its action in
the on_init and on_start hooks, and that it terminated immediately after completion.
Has no messaging attachment.
"""
process_type = "immediate"
class StreamProcess(BaseService):
"""
Base class for a stream process.
Such a process handles a sequence of otherwise unconstrained messages, resulting from a
subscription. There are no operations.
"""
process_type = "stream_process"
def call_process(self, message, stream_route, stream_id):
"""
Handles pre-processing of packet and process work
"""
self.process(message)
def process(self, message):
"""
Process a message as arriving based on a subscription.
"""
pass
# ---------------------------------------------------------------------------------------------------
# Process helpers
def get_ion_actor_id(process):
"""Given an ION process, return the ion-actor-id from the context, if set and present"""
ion_actor_id = None
if process:
ctx = process.get_context()
ion_actor_id = ctx.get(MSG_HEADER_ACTOR, None) if ctx else None
return ion_actor_id
def set_process_stats_callback(stats_cb):
""" Sets a callback function (hook) to push stats after a process operation call. """
global stats_callback
if stats_cb is None:
pass
elif stats_callback:
log.warn("Stats callback already defined")
stats_callback = stats_cb
|
scionrep/scioncc
|
src/pyon/ion/process.py
|
Python
|
bsd-2-clause
| 27,034
| 0.005401
|
#! /usr/bin/python
# Joe Deller 2014
# Finding out where we are in minecraft
# Level : Beginner
# Uses : Libraries, variables, functions
# Minecraft worlds on the Raspberry Pi are smaller than
# other minecraft worlds, but are still pretty big
# So one of the first things we need to learn to do
# is find out where we are in the world
# As the player moves around the world, Minecraft keeps track
# of the X (left / right ) , Y (height) ,Z (depth) coordinates of the player
# You can see these numbers on the main minecraft game screen
# The minecraft library has a method called getTilePos()
# It tracks where the player is
# This program introduces the "while" keyword, our first
# example of a loop, to make sure the program never stops
# until there is either an error, or we manually stop (break)
# the program using Ctrl-C on the keyboard
import mcpi.minecraft as minecraft
# This program also uses another library, the time library
# as we want the program to sleep for a short time - 1 second
# so that we don't fill the screen with too much information
# We will come across the time library later when we
# make a minecraft digital clock
import time
# Connect to Minecraft
mc = minecraft.Minecraft.create()
# We will use the getTilePos() method to tell us where we are
# and store that information in a variable
# Technically this is a special kind of variable, called an "object"
# but for now all we need to worry about is what to call it
# Most computer languages are very strict about using capital letters
# To the computer, playerPos, Playerpos and PlayerPOS are completely
# different things, so once you decide on a name, you need to spell
# it the same way every time you want to use it
playerPos = mc.player.getTilePos()
# playerPos now has our 3d position in the minecraft world
# it is made up of three parts, x, y & z
# There is another similar function called getPos()
# The difference is that getTilePos() returns whole numbers
# getPos() returns the exact position, to several decimal places
# We will stick with whole numbers for now
# playerPos = mc.player.getPos()
# We will be using a special kind of loop - an infinite loop
# Unless there is an error or we manually stop the program
# it will run forever
# True and False are normally used to compare one or more items and then
# make a choice, but for this program the loop is really saying "is true equal true?"
# the answer will always be yes so the loop will never stop
# We will be using more while loops in later programs
# The main thing we need to worry about is the spacing
# Notice playerPos has four spaces before it
# This means that it is "inside" the loop
# Python is very fussy about spaces, something we will be seeing again and again
# However,comments do not care about spaces
while True:
myLocation = mc.player.getTilePos()
# myLocation is variable that contains three variables inside in
# we can get these one at a time, or all three at once
# Before we can use them with postToChat() we need to change them
# from numbers, into characters - called a string
# There are several ways of doing this, for now we will use a command
# called str , which takes a number and hands back a string
# of characters. Although to us there isn't any apparent difference
# the way the numbers and characters are stored is very different.
x = str(myLocation.x)
y = str(myLocation.y)
z = str(myLocation.z)
# We use the postToChat() method from our hello world example
mc.postToChat("You are standing at X: " + x + ", Y: " + y + ", Z: " + z)
# Take a breath!
time.sleep(1)
|
joedeller/pymine
|
whereamiV2.py
|
Python
|
mit
| 3,643
| 0.000549
|
from pySDC import CollocationClasses as collclass
import numpy as np
from ProblemClass import sharpclaw
#from examples.sharpclaw_burgers1d.TransferClass import mesh_to_mesh_1d
from pySDC.datatype_classes.mesh import mesh, rhs_imex_mesh
from pySDC.sweeper_classes.imex_1st_order import imex_1st_order
import pySDC.Methods as mp
from pySDC import Log
from pySDC.Stats import grep_stats, sort_stats
# Sharpclaw imports
from clawpack import pyclaw
from clawpack import riemann
from matplotlib import pyplot as plt
if __name__ == "__main__":
# set global logger (remove this if you do not want the output at all)
logger = Log.setup_custom_logger('root')
num_procs = 1
# This comes as read-in for the level class
lparams = {}
lparams['restol'] = 1E-10
sparams = {}
sparams['maxiter'] = 20
# setup parameters "in time"
t0 = 0
dt = 0.001
Tend = 100*dt
# This comes as read-in for the problem class
pparams = {}
pparams['nvars'] = [(2,50,50)]
pparams['nu'] = 0.001
# This comes as read-in for the transfer operations
tparams = {}
tparams['finter'] = True
# Fill description dictionary for easy hierarchy creation
description = {}
description['problem_class'] = sharpclaw
description['problem_params'] = pparams
description['dtype_u'] = mesh
description['dtype_f'] = rhs_imex_mesh
description['collocation_class'] = collclass.CollGaussLobatto
description['num_nodes'] = 5
description['sweeper_class'] = imex_1st_order
description['level_params'] = lparams
#description['transfer_class'] = mesh_to_mesh_1d
#description['transfer_params'] = tparams
# quickly generate block of steps
MS = mp.generate_steps(num_procs,sparams,description)
# get initial values on finest level
P = MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend,stats = mp.run_pfasst_serial(MS,u0=uinit,t0=t0,dt=dt,Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
# print('error at time %s: %s' %(Tend,np.linalg.norm(uex.values-uend.values,np.inf)/np.linalg.norm(
# uex.values,np.inf)))
fig = plt.figure(figsize=(8,8))
plt.imshow(uend.values[0,:,:])
# plt.plot(P.state.grid.x.centers,uend.values, color='b', label='SDC')
# plt.plot(P.state.grid.x.centers,uex.values, color='r', label='Exact')
# plt.legend()
# plt.xlim([0, 1])
# plt.ylim([-1, 1])
plt.show()
|
torbjoernk/pySDC
|
examples/advection_2d_explicit/playground.py
|
Python
|
bsd-2-clause
| 2,543
| 0.009044
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
from lucene import JArray
from PyLuceneTestCase import PyLuceneTestCase
from MultiSpansWrapper import MultiSpansWrapper
from java.io import StringReader
from org.apache.lucene.analysis import Analyzer
from org.apache.lucene.analysis.core import \
LowerCaseTokenizer, WhitespaceTokenizer
from org.apache.lucene.analysis.tokenattributes import \
CharTermAttribute, OffsetAttribute, PayloadAttribute, \
PositionIncrementAttribute
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.index import MultiFields, Term
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.search import MultiPhraseQuery, PhraseQuery
from org.apache.lucene.search.payloads import PayloadSpanUtil
from org.apache.lucene.search.spans import SpanNearQuery, SpanTermQuery
from org.apache.lucene.util import BytesRef, Version
from org.apache.pylucene.analysis import \
PythonAnalyzer, PythonFilteringTokenFilter, PythonTokenFilter, \
PythonTokenizer
class PositionIncrementTestCase(PyLuceneTestCase):
"""
Unit tests ported from Java Lucene
"""
def testSetPosition(self):
class _tokenizer(PythonTokenizer):
def __init__(_self, reader):
super(_tokenizer, _self).__init__(reader)
_self.TOKENS = ["1", "2", "3", "4", "5"]
_self.INCREMENTS = [1, 2, 1, 0, 1]
_self.i = 0
_self.posIncrAtt = _self.addAttribute(PositionIncrementAttribute.class_)
_self.termAtt = _self.addAttribute(CharTermAttribute.class_)
_self.offsetAtt = _self.addAttribute(OffsetAttribute.class_)
def incrementToken(_self):
if _self.i == len(_self.TOKENS):
return False
_self.clearAttributes()
_self.termAtt.append(_self.TOKENS[_self.i])
_self.offsetAtt.setOffset(_self.i, _self.i)
_self.posIncrAtt.setPositionIncrement(_self.INCREMENTS[_self.i])
_self.i += 1
return True
def end(_self):
pass
def reset(_self):
pass
def close(_self):
pass
class _analyzer(PythonAnalyzer):
def createComponents(_self, fieldName, reader):
return Analyzer.TokenStreamComponents(_tokenizer(reader))
writer = self.getWriter(analyzer=_analyzer())
d = Document()
d.add(Field("field", "bogus", TextField.TYPE_STORED))
writer.addDocument(d)
writer.commit()
writer.close()
searcher = self.getSearcher()
reader = searcher.getIndexReader()
pos = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "field", BytesRef("1"))
pos.nextDoc()
# first token should be at position 0
self.assertEqual(0, pos.nextPosition())
pos = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "field", BytesRef("2"))
pos.nextDoc()
# second token should be at position 2
self.assertEqual(2, pos.nextPosition())
q = PhraseQuery()
q.add(Term("field", "1"))
q.add(Term("field", "2"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# same as previous, just specify positions explicitely.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 1)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# specifying correct positions should find the phrase.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 2)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "3"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# phrase query would find it when correct positions are specified.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "4"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
# phrase query should fail for non existing searched term
# even if there exist another searched terms in the same searched
# position.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "9"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# multi-phrase query should succed for non existing searched term
# because there exist another searched terms in the same searched
# position.
mq = MultiPhraseQuery()
mq.add([Term("field", "3"), Term("field", "9")], 0)
hits = searcher.search(mq, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "4"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
def testPayloadsPos0(self):
writer = self.getWriter(analyzer=TestPayloadAnalyzer())
doc = Document()
doc.add(Field("content", "a a b c d e a f g h i j a b k k",
TextField.TYPE_STORED))
writer.addDocument(doc)
reader = writer.getReader()
writer.close()
tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"content", BytesRef("a"))
count = 0
self.assert_(tp.nextDoc() != tp.NO_MORE_DOCS)
# "a" occurs 4 times
self.assertEqual(4, tp.freq())
expected = 0
self.assertEqual(expected, tp.nextPosition())
self.assertEqual(1, tp.nextPosition())
self.assertEqual(3, tp.nextPosition())
self.assertEqual(6, tp.nextPosition())
# only one doc has "a"
self.assert_(tp.nextDoc() == tp.NO_MORE_DOCS)
searcher = self.getSearcher(reader=reader)
stq1 = SpanTermQuery(Term("content", "a"))
stq2 = SpanTermQuery(Term("content", "k"))
sqs = [stq1, stq2]
snq = SpanNearQuery(sqs, 30, False)
count = 0
sawZero = False
pspans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq)
while pspans.next():
payloads = pspans.getPayload()
sawZero |= pspans.start() == 0
it = payloads.iterator()
while it.hasNext():
count += 1
it.next()
self.assertEqual(5, count)
self.assert_(sawZero)
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq)
count = 0
sawZero = False
while spans.next():
count += 1
sawZero |= spans.start() == 0
self.assertEqual(4, count)
self.assert_(sawZero)
sawZero = False
psu = PayloadSpanUtil(searcher.getTopReaderContext())
pls = psu.getPayloadsForQuery(snq)
count = pls.size()
it = pls.iterator()
while it.hasNext():
bytes = JArray('byte').cast_(it.next())
s = bytes.string_
sawZero |= s == "pos: 0"
self.assertEqual(5, count)
self.assert_(sawZero)
class StopWhitespaceAnalyzer(PythonAnalyzer):
def __init__(self, enablePositionIncrements):
super(StopWhitespaceAnalyzer, self).__init__()
self.enablePositionIncrements = enablePositionIncrements
def createComponents(self, fieldName, reader):
class _stopFilter(PythonFilteringTokenFilter):
def __init__(_self, tokenStream):
super(_stopFilter, _self).__init__(Version.LUCENE_CURRENT, tokenStream)
_self.termAtt = _self.addAttribute(CharTermAttribute.class_);
def accept(_self):
return _self.termAtt.toString() != "stop"
source = WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)
return Analyzer.TokenStreamComponents(source, _stopFilter(source))
class TestPayloadAnalyzer(PythonAnalyzer):
def createComponents(self, fieldName, reader):
source = LowerCaseTokenizer(Version.LUCENE_CURRENT, reader)
return Analyzer.TokenStreamComponents(source, PayloadFilter(source, fieldName))
class PayloadFilter(PythonTokenFilter):
def __init__(self, input, fieldName):
super(PayloadFilter, self).__init__(input)
self.input = input
self.fieldName = fieldName
self.pos = 0
self.i = 0
self.posIncrAttr = input.addAttribute(PositionIncrementAttribute.class_)
self.payloadAttr = input.addAttribute(PayloadAttribute.class_)
self.termAttr = input.addAttribute(CharTermAttribute.class_)
def incrementToken(self):
if self.input.incrementToken():
bytes = JArray('byte')("pos: %d" %(self.pos))
self.payloadAttr.setPayload(BytesRef(bytes))
if self.pos == 0 or self.i % 2 == 1:
posIncr = 1
else:
posIncr = 0
self.posIncrAttr.setPositionIncrement(posIncr)
self.pos += posIncr
self.i += 1
return True
return False
if __name__ == "__main__":
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
else:
unittest.main()
|
owenmorris/pylucene
|
test/test_PositionIncrement.py
|
Python
|
apache-2.0
| 11,319
| 0.002297
|
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '\x91\x95\xa5\xf7\xe0^bz\xc0\xf4\x04\xf9Z\xebA\xba'
_lr_action_items = {'NAME':([0,2,5,7,11,12,13,14,],[1,8,8,8,8,8,8,8,]),')':([3,8,9,10,16,17,18,19,20,],[-9,-10,-7,16,-8,-4,-3,-5,-6,]),'(':([0,2,5,7,11,12,13,14,],[5,5,5,5,5,5,5,5,]),'+':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,12,-10,-7,12,12,-8,-4,-3,-5,-6,]),'*':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,13,-10,-7,13,13,-8,13,13,-5,-6,]),'-':([0,1,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[2,-10,2,-9,2,11,2,-10,-7,11,2,2,2,2,11,-8,-4,-3,-5,-6,]),'NUMBER':([0,2,5,7,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'/':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,14,-10,-7,14,14,-8,14,14,-5,-6,]),'=':([1,],[7,]),'$end':([1,3,4,6,8,9,15,16,17,18,19,20,],[-10,-9,0,-2,-10,-7,-1,-8,-4,-3,-5,-6,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expression':([0,2,5,7,11,12,13,14,],[6,9,10,15,17,18,19,20,]),'statement':([0,],[4,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> statement","S'",1,None,None,None),
('statement -> NAME = expression','statement',3,'p_statement_assign','D:\\repos\\test\\testpy\\testply.py',58),
('statement -> expression','statement',1,'p_statement_expr','D:\\repos\\test\\testpy\\testply.py',63),
('expression -> expression + expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',68),
('expression -> expression - expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',69),
('expression -> expression * expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',70),
('expression -> expression / expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',71),
('expression -> - expression','expression',2,'p_expression_uminus','D:\\repos\\test\\testpy\\testply.py',83),
('expression -> ( expression )','expression',3,'p_expression_group','D:\\repos\\test\\testpy\\testply.py',88),
('expression -> NUMBER','expression',1,'p_expression_number','D:\\repos\\test\\testpy\\testply.py',93),
('expression -> NAME','expression',1,'p_expression_name','D:\\repos\\test\\testpy\\testply.py',98),
]
|
quchunguang/test
|
testpy/parsetab.py
|
Python
|
mit
| 2,575
| 0.133592
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.