code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#-*-coding:utf-8-*-
from . import about_blueprint
from flask import render_template
@about_blueprint.route("/")
def about_index():
return render_template("about.html")
|
PythonScientists/Shape
|
main/module/about/views.py
|
Python
|
apache-2.0
| 173
|
#!/bin/python
from zeroos.core0.client import Client
import sys
import time
"""
This script expect you know the IP of the core0 and you can access it from the machine running this script.
an easy way to do it is to build the initramfs with a customr zerotier network id (https://github.com/g8os/initramfs/tree/0.10.0#customize-build)
At boot core0 will connect to the zerotier network and you can assing an IP to it.
"""
CORE0IP = "INSERT CORE0 IP HERE"
ZEROTIER = "INSERT ZEROTIER NETWORK ID HERE"
def main(init=False):
print("[+] connect to core0")
cl = Client(CORE0IP)
try:
cl.ping()
except Exception as e:
print("cannot connect to the core0: %s" % e)
return 1
print("[+] prepare data disks")
cl.system('mkdir -p /dev/mongodb_storage').get()
if init:
cl.btrfs.create('mongodb_storage', ['/dev/sda'])
disks = cl.disk.list().get('blockdevices', [])
if len(disks) < 1:
print("[-] need at least one data disk available")
return
disks_by_name = {d['name']: d for d in disks}
if disks_by_name['sda']['mountpoint'] is None:
print("[+] mount disk")
cl.disk.mount('/dev/sda', '/dev/mongodb_storage', [''])
try:
print("[+] create container")
container_id = cl.container.create('https://stor.jumpscale.org/stor2/flist/ubuntu-g8os-flist/mongodb-g8os.flist',
mount={"/dev/mongodb_storage": "/mnt/data"},
zerotier=ZEROTIER).get()
print("[+] container created, ID: %s" % container_id)
except Exception as e:
print("[-] error during container creation: %s" % e)
return 1
container = cl.container.client(container_id)
print("[+] get zerotier ip")
container_ip = get_zerotier_ip(container)
print("[+] configure mongodb")
container.system("bash -c 'echo DAEMONUSER=\"root\" > /etc/default/mongodb'").get()
container.system("sed -i 's/dbpath.*/dbpath=\/mnt\/data/' /etc/mongodb.conf").get()
container.system("sed -i '/bind.*/d' /etc/mongodb.conf").get()
container.system("bash -c 'echo nounixsocket=true >> /etc/mongodb.conf'").get()
print("[+] starts mongod")
res = container.system('/etc/init.d/mongodb start').get()
print("[+] you can connect to mongodb at %s:27017" % container_ip)
def get_zerotier_ip(container):
i = 0
while i < 10:
addrs = container.info.nic()
ifaces = {a['name']: a for a in addrs}
for iface, info in ifaces.items():
if iface.startswith('zt'):
cidr = info['addrs'][0]['addr']
return cidr.split('/')[0]
time.sleep(2)
i += 1
raise TimeoutError("[-] couldn't get an ip on zerotier network")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='attach disks to core0')
parser.add_argument('--init', type=bool, default=False, const=True, required=False,
help='creation filesystem and subvolume', nargs='?')
args = parser.parse_args()
# print(args.init)
main(init=args.init)
|
g8os/core0
|
docs/_archive/examples/mongodb.py
|
Python
|
apache-2.0
| 3,162
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
"""
Test of basic math operations on the Tensors and compare with numpy results
The Tensor types includes GPU and CPU Tensors
"""
import numpy as np
import itertools as itt
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
from neon.backends.tests.utils import assert_tensors_allclose
def init_helper(lib, inA, inB, dtype):
A = lib.array(inA, dtype=dtype)
B = lib.array(inB, dtype=dtype)
C = lib.empty(inB.shape, dtype=dtype)
return A, B, C
def math_helper(lib, op, inA, inB, dtype):
A, B, C = init_helper(lib, inA, inB, dtype)
if op == '+':
C[:] = A + B
elif op == '-':
C[:] = A - B
elif op == '*':
C[:] = A * B
elif op == '/':
C[:] = A / B
elif op == '>':
C[:] = A > B
elif op == '>=':
C[:] = A >= B
elif op == '<':
C[:] = A < B
elif op == '<=':
C[:] = A <= B
return C
def compare_helper(op, inA, inB, dtype):
numpy_result = math_helper(np, op, inA, inB, dtype=np.float32)
if np.dtype(dtype).kind == 'i' or np.dtype(dtype).kind == 'u':
numpy_result = np.around(numpy_result)
numpy_result = numpy_result.clip(
np.iinfo(dtype).min, np.iinfo(dtype).max)
numpy_result = numpy_result.astype(dtype)
if dtype in (np.float32, np.float16):
gpu = NervanaGPU(default_dtype=dtype)
nervanaGPU_result = math_helper(gpu, op, inA, inB, dtype=dtype)
nervanaGPU_result = nervanaGPU_result.get()
np.allclose(numpy_result, nervanaGPU_result, rtol=0, atol=1e-5)
cpu = NervanaCPU(default_dtype=dtype)
nervanaCPU_result = math_helper(cpu, op, inA, inB, dtype=dtype)
nervanaCPU_result = nervanaCPU_result.get()
np.allclose(numpy_result, nervanaCPU_result, rtol=0, atol=1e-5)
def rand_unif(dtype, dims):
if np.dtype(dtype).kind == 'f':
return np.random.uniform(-1, 1, dims).astype(dtype)
else:
iinfo = np.iinfo(dtype)
return np.around(np.random.uniform(iinfo.min, iinfo.max, dims)).clip(iinfo.min, iinfo.max)
def pytest_generate_tests(metafunc):
"""
Build a list of test arguments.
"""
dims = [(64, 327),
(64, 1),
(1, 1023),
(4, 3),
]
dtypes = [np.float32, np.float16]
if 'fargs_tests' in metafunc.fixturenames:
fargs = itt.product(dims, dtypes)
metafunc.parametrize("fargs_tests", fargs)
def test_math(fargs_tests):
dims, dtype = fargs_tests
randA = rand_unif(dtype, dims)
randB = rand_unif(dtype, dims)
compare_helper('+', randA, randB, dtype)
compare_helper('-', randA, randB, dtype)
compare_helper('*', randA, randB, dtype)
compare_helper('>', randA, randB, dtype)
compare_helper('>=', randA, randB, dtype)
compare_helper('<', randA, randB, dtype)
compare_helper('<=', randA, randB, dtype)
def test_slicing(fargs_tests):
dims, dtype = fargs_tests
gpu = NervanaGPU(default_dtype=dtype)
cpu = NervanaCPU(default_dtype=dtype)
array_np = np.random.uniform(-1, 1, dims).astype(dtype)
array_ng = gpu.array(array_np, dtype=dtype)
array_nc = cpu.array(array_np, dtype=dtype)
assert_tensors_allclose(array_ng[0], array_nc[0], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[-1], array_nc[-1], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[0, :], array_nc[0, :], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[0:], array_nc[0:], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[:-1], array_nc[:-1], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[:, 0], array_nc[:, 0], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[:, 0:1], array_nc[:, 0:1], rtol=0, atol=1e-3)
assert_tensors_allclose(array_ng[-1, 0:], array_nc[-1:, 0:], rtol=0, atol=1e-3)
array_ng[0] = 0
array_nc[0] = 0
assert_tensors_allclose(array_ng, array_nc, rtol=0, atol=1e-3)
del(gpu)
|
DougFirErickson/neon
|
neon/backends/tests/test_tensor.py
|
Python
|
apache-2.0
| 4,727
|
#!/usr/bin/env python
# flake8: noqa
from setuptools import find_packages, setup
setup(
name = "acos-client",
version = "1.4.6",
packages = find_packages(),
author = "A10 Networks",
author_email = "mdurrant@a10networks.com",
description = "A10 Networks ACOS API Client",
license = "Apache",
keywords = "a10 axapi acos adc slb load balancer",
url = "https://github.com/a10networks/acos-client",
long_description = open('README.md').read(),
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires = ['requests>=2.3.0', 'six', 'uhashring'],
test_suite="acos_client.tests.test_suite"
)
|
mdurrant-b3/acos-client
|
setup.py
|
Python
|
apache-2.0
| 1,390
|
import urllib2
import zipfile
import re
url = "http://www.pythonchallenge.com/pc/def/channel.zip"
data = urllib2.urlopen(url).read()
file1='ttt.zip'
with open(file1,'wb+') as fzip:
fzip.write(data)
#dd=re.compile(r'[from|is] (\d+$)')
dd = re.compile(r'(?<=[from|is] )(\d+)')
zf = zipfile.ZipFile(file1,'r')
text=zf.read('readme.txt')
cc = dd.search(text)
while cc:
#print text
text = zf.read(cc.group()+'.txt')
cc = dd.search(text)
print text
dd = re.compile(r'(?<=[from|is] )(\d+)')
zf = zipfile.ZipFile(file1,'r')
text=zf.read('readme.txt')
cc = dd.search(text)
data1=""
while cc:
#print text
text = zf.read(cc.group()+'.txt')
data1 += zf.getinfo(cc.group()+'.txt').comment
cc = dd.search(text)
print text
print data1
|
gregorianzhang/pythonchallenge
|
6.py
|
Python
|
apache-2.0
| 761
|
'''
Check Yahoo finance currency data helper.
Update log: (date / version / author : comments)
2017-12-10 / 1.0.0 / Du Jiang : Creation
2017-12-13 / 2.0.0 / Du Jiang : Use new API
'''
from com.djs.learn.financeapi import CheckFinanceDataRequests
__data_type = 1
__inventory_info_file_path = "../../../../etc/CurrencyInfo.csv"
__result_output_file_path = "../../../../Temp/CurrencyDataY.json"
argv = ["-d", __data_type, "-i", __inventory_info_file_path,
"-o", __result_output_file_path]
CheckFinanceDataRequests.main(argv)
'''
Or run:
python CheckFinanceDataRequests.py -d 1 -i "../../../../etc/CurrencyData.csv" -o "../../../../Temp/CurrencyDataY.json"
'''
if __name__ == '__main__':
pass
|
djsilenceboy/LearnTest
|
Python_Test/PyFinanceApiSample/com/djs/learn/test/TestCheckYahooFinanceCurrencyData.py
|
Python
|
apache-2.0
| 709
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
time_trigger_opts = [
cfg.IntOpt('min_interval',
default=60 * 60,
help='The minimum interval of two adjacent time points. '
'min_interval >= (max_window_time * 2)'),
cfg.IntOpt('min_window_time',
default=900,
help='The minimum window time'),
cfg.IntOpt('max_window_time',
default=1800,
help='The maximum window time'),
cfg.StrOpt('time_format',
default='calendar',
choices=['crontab', 'calendar'],
help='The type of time format which is used to compute time'),
cfg.IntOpt('trigger_poll_interval',
default=15,
help='Interval, in seconds, in which Karbor will poll for '
'trigger events'),
cfg.StrOpt('scheduling_strategy',
default='multi_node',
help='Time trigger scheduling strategy '
)
]
CONF = cfg.CONF
CONF.register_opts(time_trigger_opts)
|
openstack/smaug
|
karbor/services/operationengine/engine/triggers/timetrigger/__init__.py
|
Python
|
apache-2.0
| 1,625
|
"""
Provided code for Application portion of Module 1
Imports physics citation graph
"""
###################################
# Code for loading citation graph
CITATION_URL = "phys-cite_graph.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = open(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
citation_graph = load_graph(CITATION_URL)
|
tsh/coursera-algorithmic-thinking
|
Week 1/graph_loader.py
|
Python
|
apache-2.0
| 908
|
import os
import sys
import logging
import threading
from flask import Flask
app = Flask(__name__)
app.config.from_envvar("PEGASUS_METRICS_CONFIG")
# This is required for sessions and message flashing
app.secret_key = os.urandom(24)
import pegasus.metrics.views
import pegasus.metrics.filters
|
pegasus-isi/pegasus-metrics
|
pegasus/metrics/__init__.py
|
Python
|
apache-2.0
| 298
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.cogdominium.DistributedCogdoElevatorInt
from toontown.building.DistributedElevatorInt import DistributedElevatorInt
class DistributedCogdoElevatorInt(DistributedElevatorInt):
def _getDoorsClosedInfo(self):
return ('cogdoInterior', 'cogdoInterior')
|
DedMemez/ODS-August-2017
|
cogdominium/DistributedCogdoElevatorInt.py
|
Python
|
apache-2.0
| 335
|
#!/usr/bin/python
from subprocess import call
import sys
import os
from socket import *
cs = socket(AF_INET, SOCK_DGRAM)
cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
###Broadcast according to client group
#Show ports associated with a particular group
file = "group_port.txt" #Name of file containing Groups
a = open(file,'r')
file_contents = a.read()
print(file_contents)
a.close()
#Taking port as input
print("Enter the port of the associated Group: ")
port = int(input())
###Assigning the port and broadcasting address
#Note - Change Port no. according to the group no.
#port = 9999 #Default port
addr = ('255.255.255.255',port) #Address used for broadcasting
###Setting the buffer size
buf =1024 #Buffer Size
file_name=sys.argv[1] #Taking file name from command line argument [0]-program_file name, [1]- input provided
#[2] - multicast (using for broadcasting), [3] - file with list of IP's,on which to broadcast
###Writing server's IP to file
#Taking the ip as input from server_ip file - just for reference
fp = open("server_ip","r")
ip = fp.read()
fp.close()
written = 0
ipp = ip
#Checking if IP already exists
fl = open(file_name,'r')
lines = fl.readlines()
for line in lines:
if line == ipp:
written = 1
fl.close()
#If not written then write IP to file
if written !=1:
file = open(file_name,"a")
file.write(ip)
file.close()
#Writing IP ends here
#Encrypting the file with GPG key
call(["gpg", "-r", "trialuser@mailinator.com", "-e", file_name])
file_name = file_name+".gpg" #New file name
###Putting the file's content in buffer
f=open(file_name,"rb") #Opening file in read mode
data = f.read(buf) #Taking the data from file into data variable
###Sending the data
print("##################################################")
print("# Sending File to the selected group #")
print("##################################################\n")
print("##################################################")
print("# File sent to the group #")
print("##################################################")
os.remove(file_name) #Delete the intermediate (encrypted file)
cs.sendto(data,addr) #Sending data to the broadcasting address
|
Colviz/Vince
|
groups/group_server.py
|
Python
|
apache-2.0
| 2,451
|
"""Per-repository configuration options."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_repooptions
#
# Public Classes:
# Data
#
# Public Functions:
# merge_override_into_data
# merge_data_objects
# make_default_data
# data_from_json
# json_from_data
# validate_data
# data_from_repo_or_none
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import copy
import json
import phlgit_show
import phlgit_showref
class Data(object):
def __init__(self):
super(Data, self).__init__()
self.description = None
self.branch_url_format = None
self.review_url_format = None
self.admin_emails = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _merge_lists_as_sets(*list_list):
# sets aren't serializable as json so we want to store as list
new_set = set()
for l in list_list:
if l is not None:
new_set |= set(l)
return list(new_set)
def merge_override_into_data(default, override):
"""Return the result of overriding the non-None keys of 'override'.
:default: the lower precedence Data
:override: the higher precedence Data
:returns: the higher precedence Data
"""
# first create a copy of default, use deepcopy() for future-proofing
result = copy.deepcopy(default)
string_keys = [
"description",
"branch_url_format",
"review_url_format",
]
list_keys = [
"admin_emails",
]
assert set(string_keys + list_keys) == set(Data().__dict__.keys())
for key, value in override.__dict__.iteritems():
if value is not None:
if key in string_keys:
setattr(result, key, value)
else: # it's a list attribute
assert key in list_keys
if key in result.__dict__:
left = getattr(result, key)
right = getattr(override, key)
setattr(result, key, _merge_lists_as_sets(left, right))
else:
setattr(result, key, value)
return result
def merge_data_objects(*data_list):
"""Merge many Data objects, precedence increases with index in the list.
if an item in the list is None then it is ignored.
:object_list: multiple Data() args
:returns: a Data() that represents the composite of all the configs
"""
result = data_list[0]
data_list = data_list[1:]
for data in data_list:
if data is not None:
result = merge_override_into_data(result, data)
return result
def make_default_data():
"""Returns a 'Data' with sensible default values.
:returns: a 'Data'
"""
data = Data()
data.description = "(unnamed repo)"
return data
def data_from_json(json_string):
"""Returns a 'Data' from the supplied 'json_string'.
The 'json_string' doesn't have to mention all the attributes of Data, it
must not mention attributes that don't exist in Data already.
:json_string: a string of the json data
:returns: a abdt_repoconfig.Data based on 'json_string'
"""
data = Data()
for key, value in json.loads(json_string).iteritems():
getattr(data, key) # raise if the attribute doesn't already exist
setattr(data, key, value)
return data
def json_from_data(data):
"""Returns a json string from the supplied 'data'.
:data: a abdt_repoconfig.Data to encode as json
:returns: a json string based on 'data'
"""
return json.dumps(
data,
default=lambda x: x.__dict__,
sort_keys=True,
indent=4)
def validate_data(data):
"""Raise if the supplied data is invalid in any way.
:data: a Data() to be validated
:returns: None
"""
# make sure that 'data' has the same attributes as a blank data
data_key_set = set(data.__dict__.keys())
blank_data_key_set = set(Data().__dict__.keys())
if data_key_set != blank_data_key_set:
if data_key_set.issubset(blank_data_key_set):
raise Exception(
"supplied 'data' is missing fields: {fields}".format(
fields=list(blank_data_key_set - data_key_set)))
elif data_key_set.issuperset(blank_data_key_set):
raise Exception(
"supplied 'data' has extra fields: {fields}".format(
fields=list(data_key_set - blank_data_key_set)))
else:
raise Exception(
"supplied 'data' is missing or gained: {fields}".format(
fields=list(data_key_set ^ blank_data_key_set)))
if data.branch_url_format is not None:
branch = 'blahbranch'
repo_url = 'myorg/myrepo'
data.branch_url_format.format(branch=branch, repo_url=repo_url)
if data.review_url_format is not None:
review = 123
data.review_url_format.format(review=review)
def data_from_repo_or_none(repo):
"""Returns a valid 'Data' if 'repo' has a config file.
Will raise if the config file could not be parsed.
Will return 'None' if no config file was found.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: a valid 'Data' or None
"""
config = None
# try to get the file content from the special ref, if it exists
ref = 'refs/config/origin/arcyd'
if ref in phlgit_showref.names(repo):
try:
config = phlgit_show.file_on_ref(
repo, 'repo.json', ref)
except Exception:
pass
if config is not None:
config = data_from_json(config)
return config
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
valhallasw/phabricator-tools
|
py/abd/abdt_repooptions.py
|
Python
|
apache-2.0
| 6,892
|
"""Support for MyQ gateways."""
from pymyq.const import (
DEVICE_STATE as MYQ_DEVICE_STATE,
DEVICE_STATE_ONLINE as MYQ_DEVICE_STATE_ONLINE,
KNOWN_MODELS,
MANUFACTURER,
)
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
entities = []
for device in myq.gateways.values():
entities.append(MyQBinarySensorEntity(coordinator, device))
async_add_entities(entities)
class MyQBinarySensorEntity(CoordinatorEntity, BinarySensorEntity):
"""Representation of a MyQ gateway."""
_attr_device_class = DEVICE_CLASS_CONNECTIVITY
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
super().__init__(coordinator)
self._device = device
@property
def name(self):
"""Return the name of the garage door if any."""
return f"{self._device.name} MyQ Gateway"
@property
def is_on(self):
"""Return if the device is online."""
if not self.coordinator.last_update_success:
return False
# Not all devices report online so assume True if its missing
return self._device.device_json[MYQ_DEVICE_STATE].get(
MYQ_DEVICE_STATE_ONLINE, True
)
@property
def available(self) -> bool:
"""Entity is always available."""
return True
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._device.device_id
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
"sw_version": self._device.firmware_version,
}
model = KNOWN_MODELS.get(self._device.device_id[2:4])
if model:
device_info["model"] = model
return device_info
|
kennedyshead/home-assistant
|
homeassistant/components/myq/binary_sensor.py
|
Python
|
apache-2.0
| 2,370
|
# _*_ coding:utf-8 _*_
import random
from parser_xml import doxml
__author__ = 'Administrator'
import pygame
def item_to_int(array=[]):
arr = []
for a in array:
arr.append(int(a))
return arr
pygame.init()
keys = [False, False, False, False]
screen = pygame.display.set_mode((450, 650), 0, 32)
pygame.display.set_caption(u'飞机大战'.encode('utf-8'))
plane = pygame.image.load('resources/plane.png').convert_alpha()
pos = doxml('resources/plane.xml')
# hero_1
hero_1_p = pos['hero_1']
hero_1_p = item_to_int(hero_1_p)
hero_1 = plane.subsurface(pygame.Rect((hero_1_p[2], hero_1_p[3]), (hero_1_p[0], hero_1_p[1])))
hero_1_pos = [200, 580]
# bullet_1 蓝色
bullet_1_p = item_to_int(pos['bullet_1'])
bullet_1 = plane.subsurface(pygame.Rect((bullet_1_p[2], bullet_1_p[3]), (bullet_1_p[0], bullet_1_p[1])))
bullet_1_pos = [hero_1_pos[0] + hero_1_p[0] / 2 - bullet_1_p[0] / 2 + 1, hero_1_pos[1] - bullet_1_p[1]]
bullet_1_rect = pygame.Rect(bullet_1.get_rect())
# bullet_0 橙色
bullet_0_p = item_to_int(pos['bullet_0'])
bullet_0 = plane.subsurface(pygame.Rect((bullet_0_p[2], bullet_0_p[3]), (bullet_0_p[0], bullet_0_p[1])))
# 背景图片
bg1 = pygame.image.load('resources/bg_01.png')
# enemy_s
enemy_s_p = item_to_int(pos['enemy_s'])
enemy_s = plane.subsurface(pygame.Rect((enemy_s_p[2], enemy_s_p[3]), (enemy_s_p[0], enemy_s_p[1])))
enemy_s_rect = pygame.Rect(enemy_s.get_rect())
# enemy_m
enemy_m_p = item_to_int(pos['enemy_m'])
enemy_m = plane.subsurface(pygame.Rect((enemy_m_p[2], enemy_m_p[3]), (enemy_m_p[0], enemy_m_p[1])))
enemy_m_rect = pygame.Rect(enemy_m.get_rect())
# enemy_b
enemy_b_p = item_to_int(pos['enemy_b'])
enemy_b = plane.subsurface(pygame.Rect((enemy_b_p[2], enemy_b_p[3]), (enemy_b_p[0], enemy_b_p[1])))
enemy_b_rect = pygame.Rect(enemy_b.get_rect())
bullet_1_time = 15
bullet_1_array = [bullet_1_pos]
enemytimer = [100, 200, 300]
enemytimers = [0, 0, 0]
# 敌机的发子弹概率
enemy_s_g = [1, 4, 7, 9]
enemy_m_g = [1, 4]
enemy_b_g = [1]
# 敌机子弹的保存列表
enemy_s_array = []
enemy_m_array = []
enemy_b_array = []
# 敌机保存列表
smallenemy = [[100, 0]]
midenemy = []
bigenemy = []
while True:
bullet_1_time -= 1
for i in range(3):
enemytimer[i] -= 1
screen.fill(0)
screen.blit(bg1, (0, 0))
screen.blit(hero_1, hero_1_pos)
# 绘制hero_1子弹
if not bullet_1_time:
bullet_1_array.append([hero_1_pos[0] + hero_1_p[0] / 2 - bullet_1_p[0] / 2 + 1, hero_1_pos[1] - bullet_1_p[1]])
bullet_1_time = 15
index = 0
for bullet_pos in bullet_1_array:
if bullet_pos[1] < 0:
bullet_1_array.pop(index)
bullet_pos[1] -= 5
index += 1
for bullet_pos in bullet_1_array:
screen.blit(bullet_1, bullet_pos)
# 绘制小敌机
if not enemytimer[0]:
smallenemy.append([random.randint(0, 410), -20])
enemytimer[0] = 100 - (enemytimers[0] * 2)
enemytimers[0] = 35 if enemytimers[0] > 35 else enemytimers[0] + 5
index = 0
for se in smallenemy:
if se[1] > 650:
smallenemy.pop(index)
se[1] += 3
enemy_s_rect.left = se[0]
enemy_s_rect.top = se[1]
index_bullet = 0
for bullet in bullet_1_array:
bullet_1_rect.left = bullet[0]
bullet_1_rect.top = bullet[1]
if enemy_s_rect.colliderect(bullet_1_rect):
bullet_1_array.pop(index_bullet)
smallenemy.pop(index)
index += 1
# 随机是否发射子弹
# r = random.randint(1, 500)
# if r in enemy_s_g:
# enemy_s_array.append([se[0] + 15, se[1] + 27])
index = 0
# for bullet in enemy_s_array:
# if bullet[1] > 650:
# enemy_s_array.pop(index)
# bullet[1] += 5
# index += 1
for se in smallenemy:
screen.blit(enemy_s, se)
for bullet in enemy_s_array:
screen.blit(bullet_0, bullet)
# 绘制中等敌机
if not enemytimer[1]:
midenemy.append([random.randint(0, 380), -40])
enemytimer[1] = 200 - (enemytimers[1] * 2)
enemytimers[1] = 55 if enemytimers[1] > 55 else enemytimers[1] + 5
index = 0
for me in midenemy:
if me[1] > 650:
midenemy.pop(index)
me[1] += 2
index += 1
for me in midenemy:
screen.blit(enemy_m, me)
# 绘制大飞机
if not enemytimer[2]:
bigenemy.append([random.randint(0, 340), -100])
enemytimer[2] = 300 - (enemytimers[2] * 2)
enemytimers[2] = 65 if enemytimers[2] > 65 else enemytimers[2] + 5
index = 0
for be in bigenemy:
if be[1] > 650:
bigenemy.pop(index)
be[1] += 1
index += 1
for be in bigenemy:
screen.blit(enemy_b, be)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
keys[0] = True
elif event.key == pygame.K_a:
keys[1] = True
elif event.key == pygame.K_s:
keys[2] = True
elif event.key == pygame.K_d:
keys[3] = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
keys[0] = False
elif event.key == pygame.K_a:
keys[1] = False
elif event.key == pygame.K_s:
keys[2] = False
elif event.key == pygame.K_d:
keys[3] = False
if keys[0]:
hero_1_pos[1] -= 5
elif keys[2]:
hero_1_pos[1] += 5
if keys[1]:
hero_1_pos[0] -= 5
elif keys[3]:
hero_1_pos[0] += 5
|
myangeline/pygame
|
flightgame/game.py
|
Python
|
apache-2.0
| 5,800
|
from common.methods import set_progress
from utilities.models import ConnectionInfo
from servicecatalog.models import ServiceBlueprint
from infrastructure.models import CustomField
import json
from ast import literal_eval
import requests
API_CLIENT_CI = "Citrix API"
def create_custom_fields_as_needed():
CustomField.objects.get_or_create(
name='record_id',
defaults={
"label": 'Citrix DNS Record ID',
"type": 'STR',
}
)
CustomField.objects.get_or_create(
name='record_value',
defaults={
"label": 'Citrix DNS Record Value',
"type": 'STR',
}
)
CustomField.objects.get_or_create(
name='ttl',
defaults={
"label": 'Citrix DNS Record TTL',
"type": 'INT',
}
)
CustomField.objects.get_or_create(
name='recordType',
defaults={
"label": 'Citrix DNS Record Type',
"type": 'STR',
}
)
def get_citrix_url():
ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)
return "{protocol}://{hostname}".format(protocol=ci.protocol, hostname=ci.ip)
def get_citrix_api_token():
# Citrix api uses tokens to authorise requests. The tokens expires after a short while and has to be regenerated.
ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)
url = get_citrix_url()
response = requests.get(
"{url}/api/oauth/token?client_id={client_id}&client_secret={client_secret}&grant_type=client_credentials".format(
url=url, client_id=ci.username, client_secret=ci.password))
token = response.json().get('access_token')
return token
def generate_options_for_recordType(**kwargs):
return ["A", "AAAA", "MX"]
def generate_options_for_editRecordType(**kwargs):
return [(True, "Yes"), (False, "No")]
def generate_options_for_editRecordValue(**kwargs):
return [(True, "Yes"), (False, "No")]
def generate_options_for_editTTL(**kwargs):
return [(True, "Yes"), (False, "No")]
# Need a way to return a string instead of a list.
# def generate_options_for_value(**kwargs):
# resource = kwargs.get('resource')
# return literal_eval(resource.record_value).get('addresses')
def run(resource, *args, **kwargs):
create_custom_fields_as_needed()
addresses = literal_eval(resource.record_value).get('addresses')
set_progress(f"Addresses {addresses}")
_value = "".join(addresses)
url = f"https://portal.cedexis.com:443/api/v2/config/authdns.json/record/{resource.record_id}"
token = get_citrix_api_token()
editRecordValue = "{{ editRecordValue }}"
val = "{{ value }}"
value = val or _value
editTTL = "{{ editTTL }}"
ttl = "{{ ttl }}" or resource.ttl
editRecordType = "{{ editRecordType }}"
recordType = "{{ recordType }}" or resource.recordType
dnsZone = resource.citrix_zone_id
if not token:
return "FAILURE", "", "No token Authorization Token. Ensure you have set up your credentials on the " \
"connection info page "
head = {'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json'}
data = json.dumps({
"recordType": recordType,
"quickEdit": True,
"response": value,
"ttl": ttl,
"dnsZoneId": dnsZone,
"id": resource.record_id
}
)
response = requests.put(url=url, data=data, headers=head)
bp = ServiceBlueprint.objects.get(name='Citrix ITM Zone')
zone = [res for res in bp.resource_set.all() if res.citrix_zone_id == dnsZone]
if response.ok:
if val:
value = {"addresses": [val]}
resource.name = '{}- {}'.format(dnsZone, recordType)
resource.parent_resource = zone[0]
resource.record_id = response.json().get('id')
resource.record_value = value
resource.citrix_zone_id = response.json().get('dnsZoneId')
resource.recordType = recordType
resource.ttl = int(ttl)
resource.save()
return "SUCCESS", "Sample output message", ""
else:
return "FAILURE", "", "{}".format(response.json().get('errorDetails')[0].get('developerMessage'))
|
CloudBoltSoftware/cloudbolt-forge
|
blueprints/citrix/citrix_itm_predictive_dns_record/management/edit_record.py
|
Python
|
apache-2.0
| 4,201
|
import csv
import datetime
import logging
import os
from celery.task import task
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.timezone import now
from libya_elections.constants import REMINDER_CHECKIN, REMINDER_REPORT, \
REMINDER_LAST_REPORT, REMINDER_CLOSE
from polling_reports.models import CenterOpen, PollingReport, StaffPhone
from register.models import Whitelist
from text_messages.utils import get_message
from .models import Batch, Broadcast
from .utils import Line
logger = logging.getLogger(__name__)
def read_messages_from_file(file_path):
"""
Read uploaded bulk SMS file.
Generate tuples: (phone_number, message, from_shortcode).
Delete file afterward.
:param file_path:
:return:
"""
# We don't currently enable customization of the from_shortcode via file upload.
# Just use the default.
from_shortcode = None
with open(file_path, encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader:
if any(row):
line = Line._make(row)
number = int(line.number)
yield number, line.message, from_shortcode
os.remove(file_path)
@task
def upload_bulk_sms_file(batch_id, file_path):
"""
Upload a batch of bulk SMS messages for the given batch. Delete
the temp file after we're done.
Assumes the file is valid (run is_file_valid on it first!)
:param batch_id:
:param _file:
:return: message_for_user
"""
batch = Batch.objects.get(id=batch_id)
batch.add_messages(read_messages_from_file(file_path))
batch.status = Batch.PENDING
batch.save()
# Break out some of the logic for sending polling report reminder messages
# for easier testing
class PollingReportReminderMessage(object):
"""
Capture some of the common logic for polling report reminders.
(Do not instantiate, use the subclasses.)
"""
def __init__(self, message_number, reminder_number):
self.message_number = message_number
self.reminder_number = reminder_number
def get_message_code(self):
raise NotImplementedError
def get_message_text(self):
context = {'message_number': self.message_number,
'reminder_number': self.reminder_number}
return get_message(self.get_message_code()).msg.format(**context)
def get_phone_numbers_to_send_to(self):
"""
Generator that yields (phone_number, message_text, from_shortcode) tuples
for the phone numbers that we need to send this reminder to.
"""
# Get the phone numbers we want to send to, excluding those that have
# already done the thing we want to remind them of
phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\
.values_list('phone_number', flat=True)
message_text = self.get_message_text()
# Set from_number to REPORTS_SHORT_CODE so that recipient can
# simply just respond to this message with their report.
from_shortcode = settings.REPORTS_SHORT_CODE
for phone_number in phone_numbers:
yield phone_number, message_text, from_shortcode
def to_exclude(self):
raise NotImplementedError
class CheckinReminderMessage(PollingReportReminderMessage):
"""
Message telling user to check in (activate phone, roll call)
"""
def __init__(self, message_number, reminder_number):
super(CheckinReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = Whitelist
def get_message_code(self):
return REMINDER_CHECKIN
def to_exclude(self):
"""Return list of phone numbers to exclude"""
midnight = now().replace(hour=0, minute=0, microsecond=0)
return CenterOpen.objects.filter(
creation_date__gte=midnight,
).values_list('phone_number', flat=True)
class PollingDayReportReminderMessage(PollingReportReminderMessage):
"""
Message telling user to send in polling day statistics report
"""
def __init__(self, message_number, reminder_number):
super(PollingDayReportReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = StaffPhone
def get_message_code(self):
return {
4: REMINDER_REPORT,
5: REMINDER_REPORT,
6: REMINDER_LAST_REPORT,
7: REMINDER_CLOSE,
}[self.message_number]
def to_exclude(self):
"""Return list of phone numbers to exclude"""
reporting_period = self.message_number - 3
one_day_ago = now() - datetime.timedelta(hours=24)
return PollingReport.objects.filter(
period_number=reporting_period,
creation_date__gte=one_day_ago,
).values_list('phone_number', flat=True)
@task
def message_reminder_task(message_number, reminder_number, audience, election):
"""
Make a batch to send out a bunch of reminder messages to a given audience,
iffi they haven't sent us the expected report yet.
"""
logger.debug("Start message_reminder_task")
if audience not in ('whitelist', 'registered'):
raise ValueError("Unknown audience type %s - expected whitelist or registered" % audience)
# Batches need to be owned by somebody - pick a non-random superuser
user = get_user_model().objects.filter(is_active=True, is_superuser=True)[0]
batch = Batch.objects.create(
name="Reminder %d for message_number %d" % (reminder_number, message_number),
created_by=user,
priority=Batch.PRIORITY_TIME_CRITICAL)
# create the corresponding broadcast object
broadcast = Broadcast.objects.create(
created_by=batch.created_by,
batch=batch,
audience=Broadcast.STAFF_ONLY,
message=batch.name, # this message is only temporary
)
try:
if audience == 'whitelist':
msg = CheckinReminderMessage(message_number, reminder_number)
else:
msg = PollingDayReportReminderMessage(message_number, reminder_number)
batch.add_messages(msg.get_phone_numbers_to_send_to())
batch.status = Batch.APPROVED
batch.reviewed_by = user
batch.save()
# update the message for the broadcast.
broadcast.message = msg.get_message_text()
broadcast.save()
logger.debug("Batch saved")
except Exception:
logger.exception("Error while creating message reminder batch")
# If anything went wrong, don't leave partial batch lying around in unknown state
batch.delete()
broadcast.delete()
raise
@task
def approve_broadcast(broadcast_id):
"""Creates messages for each individual in the audience and
changes batch status to approved."""
broadcast = Broadcast.objects.get(pk=broadcast_id)
messages = broadcast.get_messages()
batch = broadcast.batch
batch.add_messages(messages)
batch.status = Batch.APPROVED
batch.save()
|
SmartElect/SmartElect
|
bulk_sms/tasks.py
|
Python
|
apache-2.0
| 7,112
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
itk.auto_progress(2)
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <InputFileName> <OutputFileName> [Extension]")
sys.exit(1)
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
if len(sys.argv) > 3:
extension = sys.argv[3]
else:
extension = ".png"
fileNameFormat = outputFileName + "-%d" + extension
Dimension = 3
PixelType = itk.UC
InputImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[InputImageType]
reader = ReaderType.New()
reader.SetFileName(inputFileName)
OutputPixelType = itk.UC
RescaleImageType = itk.Image[OutputPixelType, Dimension]
RescaleFilterType = itk.RescaleIntensityImageFilter[InputImageType, RescaleImageType]
rescale = RescaleFilterType.New()
rescale.SetInput(reader.GetOutput())
rescale.SetOutputMinimum(0)
rescale.SetOutputMaximum(255)
rescale.UpdateLargestPossibleRegion()
region = reader.GetOutput().GetLargestPossibleRegion()
size = region.GetSize()
fnames = itk.NumericSeriesFileNames.New()
fnames.SetStartIndex(0)
fnames.SetEndIndex(size[2] - 1)
fnames.SetIncrementIndex(1)
fnames.SetSeriesFormat(fileNameFormat)
OutputImageType = itk.Image[OutputPixelType, 2]
WriterType = itk.ImageSeriesWriter[RescaleImageType, OutputImageType]
writer = WriterType.New()
writer.SetInput(rescale.GetOutput())
writer.SetFileNames(fnames.GetFileNames())
writer.Update()
|
InsightSoftwareConsortium/ITKExamples
|
src/IO/ImageBase/GenerateSlicesFromVolume/Code.py
|
Python
|
apache-2.0
| 1,976
|
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime
from unittest.mock import call
from uuid import uuid4, UUID
from mock import patch, MagicMock
from alamo_worker.alerter.backend.cassandra import (
CassandraDriver, SELECT_QUERY, INSERT_QUERY, INSERT_SERVICE_QUERY,
AsyncCassandraDriver
)
from alamo_worker.alerter.utils import CachedResult
from tests.base import run_async
class CassandraDriverTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.check_uuid = str(uuid4())
cls.trigger_uuid = str(uuid4())
@patch('alamo_worker.alerter.backend.cassandra.Cluster')
@patch('alamo_worker.alerter.backend.cassandra.PlainTextAuthProvider')
def setUp(self, auth_mock, cluster_mock):
session_mock = MagicMock()
cluster_mock.connect = MagicMock(return_value=session_mock)
self.trigger = dict(
id=self.trigger_uuid,
uuid=str(uuid4()),
severity='WARNING',
enabled=True,
result=dict(status=0, message='')
)
self.driver = CassandraDriver(
contact_points=[''],
username='username',
password='password',
keyspace='test'
)
def test_prepared_statements(self):
expected_calls = [call(SELECT_QUERY), call(INSERT_QUERY),
call(INSERT_SERVICE_QUERY)]
self.driver._connect()
self.driver._session.prepare.assert_has_calls(
expected_calls, any_order=True
)
def test_get_result_method(self):
self.driver.get_result(self.check_uuid, self.trigger_uuid, 10)
self.driver._connect()
self.driver._session.execute.assert_called_once_with(
self.driver._save_query_stmt,
(UUID(self.check_uuid), UUID(self.trigger_uuid), 10),
execution_profile='normal'
)
def test_save_result_method(self):
_now = datetime.now()
self.driver._connect()
self.driver.save_result(
self.check_uuid, self.trigger, _now, _now, _now, False, '999'
)
self.assertTrue(self.driver._session.execute.called)
class DummyRow(object):
def __init__(self, *, status, alert_sent, message):
self.status = status
self.alert_sent = alert_sent
self.message = message
class AsyncCassandraDriverTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.check_uuid = uuid4()
cls.trigger_uuid = uuid4()
@patch.object(AsyncCassandraDriver, '_connect')
def setUp(self, mock_connect):
async def _execute(*args, **kwargs):
return [
DummyRow(status=0, alert_sent=False, message='')
]
driver = AsyncCassandraDriver(
contact_points=[''],
username='username',
password='password',
keyspace='test')
driver._session = MagicMock()
driver._session.execute_future.side_effect = _execute
self.driver = driver
self.trigger = dict(
id=self.trigger_uuid,
uuid=str(uuid4()),
severity='WARNING',
enabled=True,
result=dict(status=0, message='')
)
def test_get_result(self):
result = run_async(
self.driver.get_result(
str(self.check_uuid), str(self.trigger_uuid), 10)
)
self.driver._session.execute_future.assert_called_once_with(
self.driver._retrieve_query_stmt, (
self.check_uuid,
self.trigger_uuid,
10
),
execution_profile='normal'
)
expected = CachedResult(0, 0, False, '')
self.assertEqual(result, [expected])
def test_save_result_method(self):
_now = datetime.now()
self.driver._save_query_stmt = MagicMock()
self.driver._save_service_query_stmt = MagicMock()
run_async(self.driver.save_result(
str(self.check_uuid), self.trigger, _now, _now, _now, False, '999'
))
self.assertTrue(self.driver._session.execute_future.called)
|
RulersOfAsgard/ALAMO-worker
|
alamo_worker/alerter/tests/test_cassandra_backend.py
|
Python
|
apache-2.0
| 4,182
|
import numpy as np
import tvm
import topi
import topi.testing
from topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_conv2d_nchw(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type = 'uint32'
out_dtype = 'int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_channel, in_height, in_width), dtype=input_type, name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits,
out_dtype=out_dtype, layout="NCHW", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nchw")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def verify_bitserial_conv2d_nhwc(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type='uint32'
out_dtype='int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name='A')
W = tvm.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits, out_dtype=out_dtype,
layout="NHWC", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nhwc([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nhwc")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], 'llvm')
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
if __name__ == "__main__":
test_bitserial_conv2d()
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python/test_topi_bitserial_conv2d.py
|
Python
|
apache-2.0
| 4,850
|
from flask import Flask, redirect, abort, url_for
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login')
def login():
abort(401)
this_is_never_executed()
if __name__ == '__main__':
app.run()
|
schanezon/webapp
|
test.py
|
Python
|
apache-2.0
| 278
|
#!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/testcases/OpTestPCI.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
OpTestPCI: PCI checks
-------------------------------
Perform various PCI validations and checks
--run-suite BasicPCI (includes skiroot_suite and host_suite)
--run-suite pci-regression
Sample naming conventions below, see each test method for
the applicable options per method.
--run testcases.OpTestPCI.PCISkiroot.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
--run testcases.OpTestPCI.PCIHost.pcie_link_errors
^^^^^^^^^^^^^^^^^^^ ^^^^^^^ ^^^^^^^^^^^^^^^^
module name subclass test method
'''
import unittest
import logging
import pexpect
import time
import re
import difflib
from distutils.version import LooseVersion
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed, UnexpectedCase
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
skiroot_done = 0
host_done = 0
skiroot_lspci = None
host_lspci = None
reset_console = 0
class OpClassPCI(unittest.TestCase):
'''
Main Parent class
We cannot guarantee a soft boot entry, so need to force to PS or OS
'''
@classmethod
def setUpClass(cls, desired=None, power_cycle=0):
'''
Main setUpClass, this is shared across all subclasses.
This is called once when the subclass is instantiated.
'''
if desired is None:
cls.desired = OpSystemState.PETITBOOT_SHELL
else:
cls.desired = desired
cls.power_cycle = power_cycle
cls.conf = OpTestConfiguration.conf
cls.cv_SYSTEM = cls.conf.system()
cls.cv_HOST = cls.conf.host()
cls.my_connect = None
if cls.power_cycle == 1:
cls.cv_SYSTEM.goto_state(OpSystemState.OFF)
cls.power_cycle = 0
try:
if cls.desired == OpSystemState.OS:
# set bootdev for reboot cases
cls.cv_SYSTEM.sys_set_bootdev_no_override()
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
else:
cls.cv_SYSTEM.sys_set_bootdev_setup()
cls.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
cls.c = cls.cv_SYSTEM.console
cls.pty = cls.cv_SYSTEM.console.get_console()
except Exception as e:
log.debug("Unable to find cls.desired, probably a test code problem")
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
@classmethod
def tearDownClass(cls):
'''
Main tearDownClass, this is shared across all subclasses.
This is called once when the subclass is taken down.
'''
global skiroot_done
global host_done
global skiroot_lspci
global host_lspci
global reset_console
if reset_console == 1:
cls.refresh_console()
@classmethod
def set_console(cls):
'''
This method allows setting the shared class console to the real
console when needed, i.e. driver_bind tests which unbind the
ethernet drivers.
'''
cls.c = cls.cv_SYSTEM.console
@classmethod
def refresh_console(cls):
'''
This method is used to set the shared class console back to the proper
object (this gets set to the real console when we unbind the ethernet)
in the driver_bind test as an example.
'''
# this done after a reboot
global reset_console
if cls.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
cls.c = cls.cv_SYSTEM.console
else:
cls.c = cls.cv_SYSTEM.host().get_ssh_connection()
reset_console = 0
def setUp(self):
'''
All variables common to a subclass need to be defined here since
this method gets called before each subclass test
'''
pass
def tearDown(self):
'''
This is done at the end of each subclass test.
'''
global reset_console
if reset_console == 1:
self.refresh_console()
def get_lspci(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci
Case D --run testcases.OpTestPCI.PCIHost.get_lspci
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci
'''
lspci_data = self.c.run_command("lspci -mm -n")
return lspci_data
def check_commands(self):
'''
Checks for general capability to run commands
Case A --run testcases.OpTestPCI.PCISkiroot.check_commands
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.check_commands
Case C --run testcases.OpTestPCI.PCISkirootHardboot.check_commands
Case D --run testcases.OpTestPCI.PCIHost.check_commands
Case E --run testcases.OpTestPCI.PCIHostSoftboot.check_commands
Case F --run testcases.OpTestPCI.PCIHostHardboot.check_commands
'''
list_pci_devices_commands = ["lspci -mm -n",
"lspci -m",
"lspci -t",
"lspci -n",
"lspci -nn",
"cat /proc/bus/pci/devices",
"ls --color=never /sys/bus/pci/devices/ -l",
"lspci -vvxxx",
]
for cmd in list_pci_devices_commands:
self.c.run_command(cmd, timeout=300)
list_usb_devices_commands = ["lsusb",
"lsusb -t",
"lsusb -v",
]
for cmd in list_usb_devices_commands:
self.c.run_command(cmd)
# Test that we do not EEH on reading all config space
self.c.run_command(
"hexdump -C /sys/bus/pci/devices/*/config", timeout=600)
def get_lspci_file(self):
'''
Usually used internally, can be run for query of system
Case A --run testcases.OpTestPCI.PCISkiroot.get_lspci_file
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.get_lspci_file
Case C --run testcases.OpTestPCI.PCISkirootHardboot.get_lspci_file
Case D --run testcases.OpTestPCI.PCIHost.get_lspci_file
Case E --run testcases.OpTestPCI.PCIHostSoftboot.get_lspci_file
Case F --run testcases.OpTestPCI.PCIHostHardboot.get_lspci_file
'''
if self.conf.lspci_file():
with open(self.conf.lspci_file(), 'r') as f:
file_content = f.read().splitlines()
log.debug("file_content={}".format(file_content))
return file_content
def _diff_my_devices(self,
listA=None,
listA_name=None,
listB=None,
listB_name=None):
'''
Performs unified diff of two lists
'''
unified_output = difflib.unified_diff(
[_f for _f in listA if _f],
[_f for _f in listB if _f],
fromfile=listA_name,
tofile=listB_name,
lineterm="")
unified_list = list(unified_output)
log.debug("unified_list={}".format(unified_list))
return unified_list
def compare_boot_devices(self):
'''
This is best leveraged in the suite pci-regression,
where both the skiroot/host softboot and the
skiroot/host hardboot get done in the same wave,
so that the global variables carry over to compare.
If both skiroot and host lspci completed, will
compare lspci results.
If you want to compare against an input file, use
compare_live_devices.
Case A --run testcases.OpTestPCI.PCISkiroot.compare_boot_devices
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.compare_boot_devices
Case C --run testcases.OpTestPCI.PCISkirootHardboot.compare_boot_devices
Case D --run testcases.OpTestPCI.PCIHost.compare_boot_devices
Case E --run testcases.OpTestPCI.PCIHostSoftboot.compare_boot_devices
Case F --run testcases.OpTestPCI.PCIHostHardboot.compare_boot_devices
'''
global skiroot_done
global host_done
global skiroot_lspci
global host_lspci
lspci_output = self.get_lspci()
if self.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
skiroot_lspci = lspci_output
skiroot_done = 1
else:
host_lspci = lspci_output
host_done = 1
if host_done and skiroot_done:
compare_results = self._diff_my_devices(listA=skiroot_lspci,
listA_name="skiroot_lspci",
listB=host_lspci,
listB_name="host_lspci")
if len(compare_results):
self.assertEqual(len(compare_results), 0,
"skiroot_lspci and host_lspci devices differ:\n{}"
.format(self.conf.lspci_file(), ('\n'.join(i for i in compare_results))))
# refresh so next pair can be matched up, i.e. soft or hard
skiroot_done = 0
host_done = 0
skiroot_lspci = None
host_lspci = None
def compare_live_devices(self):
'''
Compares the live system lspci against an input file, host-lspci
provided either in conf file or via command line.
"ssh user@host lspci -mm -n > host-lspci.txt"
--host-lspci host-lspci.txt on command line
or
host_lspci=host-lspci.txt in conf file
Case A --run testcases.OpTestPCI.PCISkiroot.compare_live_devices
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.compare_live_devices
Case C --run testcases.OpTestPCI.PCISkirootHardboot.compare_live_devices
Case D --run testcases.OpTestPCI.PCIHost.compare_live_devices
Case E --run testcases.OpTestPCI.PCIHostSoftboot.compare_live_devices
Case F --run testcases.OpTestPCI.PCIHostHardboot.compare_live_devices
'''
active_lspci = self.get_lspci()
file_lspci = self.get_lspci_file()
if file_lspci:
compare_results = self._diff_my_devices(listA=file_lspci,
listA_name=self.conf.lspci_file(),
listB=active_lspci,
listB_name="Live System")
log.debug("compare_results={}".format(compare_results))
if len(compare_results):
self.assertEqual(len(compare_results), 0,
"Stored ({}) and Active PCI devices differ:\n{}"
.format(self.conf.lspci_file(), ('\n'.join(i for i in compare_results))))
def pcie_link_errors(self):
'''
Checks for link errors
Case A --run testcases.OpTestPCI.PCISkiroot.pcie_link_errors
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.pcie_link_errors
Case C --run testcases.OpTestPCI.PCISkirootHardboot.pcie_link_errors
Case D --run testcases.OpTestPCI.PCIHost.pcie_link_errors
Case E --run testcases.OpTestPCI.PCIHostSoftboot.pcie_link_errors
Case F --run testcases.OpTestPCI.PCIHostHardboot.pcie_link_errors
'''
total_entries = link_down_entries = timeout_entries = []
try:
link_down_entries = self.c.run_command(
"grep ',[432]\].*PHB#.* Link down' /sys/firmware/opal/msglog")
except CommandFailed as cf:
pass
if link_down_entries:
log.debug("link_down_entries={}".format(link_down_entries))
total_entries = total_entries + link_down_entries
log.debug(
"total_entries with link_down_entries={}".format(total_entries))
try:
timeout_entries = self.c.run_command(
"grep ',[432]\].*Timeout waiting for' /sys/firmware/opal/msglog")
except CommandFailed as cf:
pass
if timeout_entries:
log.debug("timeout_entries={}".format(timeout_entries))
total_entries = total_entries + timeout_entries
log.debug(
"total_entries with timeout_entries={}".format(total_entries))
platform = self.c.run_command("cat /proc/device-tree/compatible")
filter_out = [
'PHB#00(00|30|33|34)\[(0|8):(0|4|3)\]: LINK: Timeout waiting for link up',
'Timeout waiting for downstream link',
]
log.debug("STARTING total_entries={}".format(total_entries))
if re.search(r'p9dsu', platform[0]):
# No presence detect on some p9dsu slots :/
for f in filter_out:
fre = re.compile(f)
total_entries = [l for l in total_entries if not fre.search(l)]
log.debug("P9DSU FILTERED OUT total_entries={}".format(total_entries))
msg = '\n'.join([_f for _f in total_entries if _f])
log.debug("total_entries={}".format(total_entries))
self.assertTrue(len(total_entries) == 0,
"pcie link down/timeout Errors in OPAL log:\n{}".format(msg))
def _get_list_of_pci_devices(self):
cmd = "ls --color=never /sys/bus/pci/devices/ | awk {'print $1'}"
res = self.c.run_command(cmd)
return res
def _get_driver(self, pe):
cmd = "lspci -ks {}".format(pe)
output = self.c.run_command(cmd, timeout=120)
if output:
for line in output:
if 'Kernel driver in use:' in line:
return (line.rsplit(":")[1]).strip(" ")
return None
def _get_list_of_slots(self):
cmd = "ls --color=never /sys/bus/pci/slots/ -1"
res = self.c.run_command(cmd)
return res
def _get_root_pe_address(self):
cmd = "df -h /boot | awk 'END {print $1}'"
res = self.c.run_command(cmd)
boot_disk = ''.join(res).split("/dev/")[1]
boot_disk = boot_disk.replace("\r\n", "")
awk_string = "awk '{print $(NF-2)}'"
pre_cmd = "ls --color=never -l /dev/disk/by-path/ | grep {} | ".format(
boot_disk)
cmd = pre_cmd + awk_string
res = self.c.run_command(cmd)
root_pe = res[0].split("-")[1]
return root_pe
def _gather_errors(self):
# Gather all errors from kernel and opal logs
try:
self.c.run_command("dmesg -r|grep '<[4321]>'")
except CommandFailed:
pass
try:
self.c.run_command("grep ',[0-4]\]' /sys/firmware/opal/msglog")
except CommandFailed:
pass
def driver_bind(self):
'''
Unbind and then bind the devices
Case A --run testcases.OpTestPCI.PCISkiroot.driver_bind
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.driver_bind
Case C --run testcases.OpTestPCI.PCISkirootHardboot.driver_bind
Case D --run testcases.OpTestPCI.PCIHost.driver_bind
Case E --run testcases.OpTestPCI.PCIHostSoftboot.driver_bind
Case F --run testcases.OpTestPCI.PCIHostHardboot.driver_bind
Special note on unbinding shared bmc ethernet ports, caution.
'''
# since we will be unbinding ethernet drivers, override the console
global reset_console
reset_console = 1
self.set_console()
if self.cv_SYSTEM.get_state() == OpSystemState.PETITBOOT_SHELL:
root_pe = "xxxx"
else:
root_pe = self._get_root_pe_address()
self.c.run_command("dmesg -D")
list = self._get_list_of_pci_devices()
failure_list = {}
for slot in list:
rc = 0
driver = self._get_driver(slot)
if root_pe in slot:
continue
if driver is None:
continue
index = "{}_{}".format(driver, slot)
cmd = "echo -n {} > /sys/bus/pci/drivers/{}/unbind".format(
slot, driver)
log.debug("unbind driver={} slot={} cmd={}".format(
driver, slot, cmd))
try:
self.c.run_command(cmd)
except CommandFailed as cf:
msg = "Driver unbind operation failed for driver {}, slot {}".format(
slot, driver)
failure_list[index] = msg
time.sleep(5)
cmd = 'ls --color=never /sys/bus/pci/drivers/{}'.format(driver)
self.c.run_command(cmd)
path = "/sys/bus/pci/drivers/{}/{}".format(driver, slot)
try:
self.c.run_command("test -d {}".format(path))
rc = 1
except CommandFailed as cf:
pass
cmd = "echo -n {} > /sys/bus/pci/drivers/{}/bind".format(
slot, driver)
log.debug("bind driver={} slot={} cmd={}".format(driver, slot, cmd))
try:
self.c.run_command(cmd)
except CommandFailed as cf:
msg = "Driver bind operation failed for driver {}, slot {}".format(
slot, driver)
failure_list[index] = msg
time.sleep(5)
cmd = 'ls --color=never /sys/bus/pci/drivers/{}'.format(driver)
self.c.run_command(cmd)
try:
self.c.run_command("test -d {}".format(path))
except CommandFailed as cf:
rc = 2
self._gather_errors()
if rc == 1:
msg = "{} not unbound for driver {}".format(slot, driver)
failure_list[index] = msg
if rc == 2:
msg = "{} not bound back for driver {}".format(slot, driver)
failure_list[index] = msg
self.assertEqual(failure_list, {},
"Driver bind/unbind failures {}".format(failure_list))
def hot_plug_host(self):
'''
NEEDS TESTING
Case A --run testcases.OpTestPCI.PCIHost.hot_plug_host
Case B --run testcases.OpTestPCI.PCIHostSoftboot.hot_plug_host
Case C --run testcases.OpTestPCI.PCIHostHardboot.hot_plug_host
'''
# Currently this feature enabled only for fsp systems
if "FSP" not in self.conf.args.bmc_type:
log.debug(
"Skipping test, currently only OPAL FSP Platform supported for hot_plug_host")
self.skipTest(
"Skipping test, currently only OPAL FSP Platform supported for hot_plug_host")
res = self.c.run_command("uname -r")[-1].split("-")[0]
if LooseVersion(res) < LooseVersion("4.10.0"):
log.debug(
"Skipping test, Kernel does not support hotplug {}".format(res))
self.skipTest(
"Skipping test, Kernel does not support hotplug={}".format(res))
self.cv_HOST.host_load_module("pnv_php")
device_list = self._get_list_of_pci_devices()
root_pe = self._get_root_pe_address()
slot_list = self._get_list_of_slots()
self.c.run_command("dmesg -D")
pair = {} # Pair of device vs slot location code
for device in device_list:
cmd = "lspci -k -s {} -vmm".format(device)
res = self.c.run_command(cmd)
for line in res:
# if "PhySlot:\t" in line:
obj = re.match('PhySlot:\t(.*)', line)
if obj:
pair[device] = obj.group(1)
failure_list = {}
for device, phy_slot in list(pair.items()):
if root_pe in device:
continue
index = "{}_{}".format(device, phy_slot)
path = "/sys/bus/pci/slots/{}/power".format(phy_slot)
try:
self.c.run_command("test -f {}".format(path))
except CommandFailed as cf:
log.debug("Slot {} does not support hotplug".format(phy_slot))
continue # slot does not support hotplug
try:
self.c.run_command("echo 0 > {}".format(path))
except CommandFailed as cf:
msg = "PCI device/slot power off operation failed"
failure_list[index] = msg
time.sleep(5)
cmd = "lspci -k -s {}".format(device)
res = self.c.run_command(cmd)
if device in "\n".join(res):
msg = "PCI device failed to remove after power off operation"
failure_list[index] = msg
try:
self.c.run_command("echo 1 > {}".format(path))
except CommandFailed as cf:
msg = "PCI device/slot power on operation failed"
failure_list[index] = msg
res = self.c.run_command(cmd)
if device not in "\n".join(res):
msg = "PCI device failed to attach back after power on operation"
failure_list[index] = msg
self._gather_errors()
self.assertEqual(failure_list, {},
"PCI Hotplug failures {}".format(failure_list))
def pci_link_check(self):
'''
PCI link checks
Case A --run testcases.OpTestPCI.PCISkiroot.pci_link_check
Case B --run testcases.OpTestPCI.PCISkirootSoftboot.pci_link_check
Case C --run testcases.OpTestPCI.PCISkirootHardboot.pci_link_check
Case D --run testcases.OpTestPCI.PCIHost.pci_link_check
Case E --run testcases.OpTestPCI.PCIHostSoftboot.pci_link_check
Case F --run testcases.OpTestPCI.PCIHostHardboot.pci_link_check
'''
lspci_output = self.c.run_command("lspci")
# List of devices that won't be checked
blacklist = [
"Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01)"]
# Populating device id list
device_ids = []
for line in lspci_output:
if line:
line = line.strip().split(' ')
device_ids.append(line[0])
class Device:
def __init__(self, device_info):
self.domain = ""
self.primary = ""
self.slotfunc = ""
self.name = ""
self.secondary = ""
self.capability = ""
self.capspeed = 0
self.capwidth = 0
self.staspeed = 0
self.stawidth = 0
# 0000:00:00.0 PCI bridge: IBM Device 03dc
id_components = device_info[0].split(":")
self.domain = id_components[0]
self.primary = id_components[1]
self.slotfunc = id_components[2].split()[0]
self.name = id_components[-1].strip()
for line in device_info[1:]:
if line:
line = line.strip()
if "Bus:" in line:
line = line.split("secondary=")
self.secondary = line[1][:2]
if "Express (v" in line:
self.capability = "Endpoint"
if "Root Port" in line:
self.capability = "Root"
if "Upstream" in line:
self.capability = "Upstream"
if "Downstream" in line:
self.capability = "Downstream"
if "LnkCap:" in line:
# LnkCap: Port #0, Speed 8GT/s, Width x16, ASPM L0s, Exit Latency L0s unlimited, L1 unlimited
line = line.split("GT/s, Width x")
self.capspeed = float(line[0].split()[-1])
self.capwidth = float(line[1].split(",")[0])
if "LnkSta:" in line:
# LnkSta: Speed 8GT/s, Width x8, TrErr- Train- SlotClk+ DLActive+ BWMgmt- ABWMgmt+
line = line.split("GT/s, Width x")
self.staspeed = float(line[0].split()[-1])
self.stawidth = float(line[1].split(",")[0])
def get_details(self):
msg = ("{}, capability={}, secondary={} \n"
.format(self.get_id(), self.capability, self.secondary))
msg += ("capspeed={}, capwidth={}, staspeed={}, stawidth={}"
.format(self.capspeed, self.capwidth, self.staspeed, self.stawidth))
return msg
def get_id(self):
return "{}:{}:{}".format(self.domain, self.primary, self.slotfunc)
# Checking if two devices are linked together
def devicesLinked(upstream, downstream):
if upstream.domain == downstream.domain:
if upstream.secondary == downstream.primary:
if upstream.capability == "Root":
if downstream.capability == "Upstream":
return True
if downstream.capability == "Endpoint":
return True
if upstream.capability == "Downstream":
if downstream.capability == "Endpoint":
return True
return False
# Checking if LnkSta matches LnkCap - speed
def optimalSpeed(upstream, downstream):
if upstream.capspeed > downstream.capspeed:
optimal_speed = downstream.capspeed
else:
optimal_speed = upstream.capspeed
if optimal_speed > upstream.staspeed:
return False
return True
# Checking if LnkSta matches LnkCap - width
def optimalWidth(upstream, downstream):
if upstream.capwidth > downstream.capwidth:
optimal_width = downstream.capwidth
else:
optimal_width = upstream.capwidth
if optimal_width > upstream.stawidth:
return False
return True
device_list = []
# Filling device objects' details
for device in device_ids:
device_info = self.c.run_command("lspci -s {} -vv".format(device))
device_list.append(Device(device_info))
checked_devices = []
suboptimal_links = ""
blacklist_links = ""
# Returns a string containing details of the suboptimal link
def subLinkInfo(upstream, downstream):
msg = "\nSuboptimal link between {} and {} - ".format(
upstream.get_id(), downstream.get_id())
if not optimalSpeed(upstream, downstream):
if upstream.capspeed > downstream.capspeed:
optimal_speed = downstream.capspeed
else:
optimal_speed = upstream.capspeed
actual_speed = upstream.staspeed
msg += "Link speed capability is {}GT/s but status was {}GT/s. ".format(
optimal_speed, actual_speed)
if not optimalWidth(upstream, downstream):
if upstream.capwidth > downstream.capwidth:
optimal_width = downstream.capwidth
else:
optimal_width = upstream.capwidth
actual_width = upstream.stawidth
msg += "Link width capability is x{} but status was x{}. ".format(
optimal_width, actual_width)
return msg
# Searching through devices to check for links and testing to see if they're optimal
for device in device_list:
if device not in checked_devices:
checked_devices.append(device)
for endpoint in device_list:
if endpoint not in checked_devices:
if devicesLinked(device, endpoint):
checked_devices.append(endpoint)
log.debug("checking link between {} and {}".format(
device.get_id(), endpoint.get_id()))
log.debug(device.get_details())
log.debug(endpoint.get_details())
if endpoint.name in blacklist:
no_check_msg = ("Link between {} and {} not checked as {} is in the list of blacklisted devices"
.format(device.get_id(), endpoint.get_id(), endpoint.get_id()))
log.info(no_check_msg)
blacklist_links += "{}\n".format(no_check_msg)
else:
if(not optimalSpeed(device, endpoint)) or (not optimalWidth(device, endpoint)):
suboptimal_links += subLinkInfo(
device, endpoint)
log.debug("")
log.debug("Finished testing links")
log.debug("blacklist_links={}".format(blacklist_links))
log.debug("suboptimal_links={}".format(suboptimal_links))
# Assert suboptimal list is empty
self.assertEqual(len(suboptimal_links), 0, suboptimal_links)
class PCISkirootSoftboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCISkirootSoftboot, cls).setUpClass()
cls.pty.sendline("reboot")
cls.cv_SYSTEM.set_state(OpSystemState.IPLing)
# clear the states since we rebooted outside the state machine
cls.cv_SYSTEM.util.clear_state(cls.cv_SYSTEM)
cls.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
@classmethod
def tearDownClass(cls):
super(PCISkirootSoftboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkirootSoftboot, self).setUp()
class PCISkirootHardboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCISkirootHardboot, cls).setUpClass(power_cycle=1)
@classmethod
def tearDownClass(cls):
super(PCISkirootHardboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkirootHardboot, self).setUp()
class PCISkiroot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
def setUp(self):
# this left as placeholder for per test setUp
super(PCISkiroot, self).setUp()
class PCIHostSoftboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHostSoftboot, cls).setUpClass(desired=OpSystemState.OS)
cls.pty.sendline("reboot")
cls.cv_SYSTEM.set_state(OpSystemState.BOOTING)
# clear the states since we rebooted outside the state machine
cls.cv_SYSTEM.util.clear_state(cls.cv_SYSTEM)
cls.cv_SYSTEM.goto_state(OpSystemState.OS)
@classmethod
def tearDownClass(cls):
super(PCIHostSoftboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHostSoftboot, self).setUp()
class PCIHostHardboot(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHostHardboot, cls).setUpClass(
desired=OpSystemState.OS, power_cycle=1)
@classmethod
def tearDownClass(cls):
super(PCIHostHardboot, cls).tearDownClass()
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHostHardboot, self).setUp()
class PCIHost(OpClassPCI, unittest.TestCase):
'''
Class allows to run parent classes with unique setup
'''
@classmethod
def setUpClass(cls):
super(PCIHost, cls).setUpClass(desired=OpSystemState.OS)
def setUp(self):
# this left as placeholder for per test setUp
super(PCIHost, self).setUp()
def skiroot_softboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.skiroot_softboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'compare_boot_devices']
return unittest.TestSuite(list(map(PCISkirootSoftboot, tests)))
def skiroot_hardboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.skiroot_hardboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'compare_boot_devices']
return unittest.TestSuite(list(map(PCISkirootHardboot, tests)))
def skiroot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite BasicPCI
--run testcases.OpTestPCI.skiroot_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices']
return unittest.TestSuite(list(map(PCISkiroot, tests)))
def skiroot_full_suite():
'''
Function used to prepare a test suite (see op-test)
--run testcases.OpTestPCI.skiroot_full_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'driver_bind']
return unittest.TestSuite(list(map(PCISkiroot, tests)))
def host_softboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.host_softboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices', 'pci_link_check',
'compare_boot_devices', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHostSoftboot, tests)))
def host_hardboot_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite pci-regression
--run testcases.OpTestPCI.host_hardboot_suite
'''
tests = ['pcie_link_errors', 'compare_live_devices', 'pci_link_check',
'compare_boot_devices', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHostHardboot, tests)))
def host_suite():
'''
Function used to prepare a test suite (see op-test)
--run-suite BasicPCI
--run testcases.OpTestPCI.host_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices']
return unittest.TestSuite(list(map(PCIHost, tests)))
def host_full_suite():
'''
Function used to prepare a test suite (see op-test)
--run testcases.OpTestPCI.host_full_suite
This suite does not care on soft vs hard boot
'''
tests = ['pcie_link_errors', 'compare_live_devices',
'pci_link_check', 'driver_bind', 'hot_plug_host']
return unittest.TestSuite(list(map(PCIHost, tests)))
|
open-power/op-test-framework
|
testcases/OpTestPCI.py
|
Python
|
apache-2.0
| 36,744
|
from decimal import Decimal
from voluptuous import MultipleInvalid
from minfraud.validation import validate_transaction, validate_report
import unittest
class ValidationBase:
def setup_transaction(self, transaction):
if "device" not in transaction:
transaction["device"] = {}
if "ip_address" not in transaction["device"]:
transaction["device"]["ip_address"] = "1.1.1.1"
def check_invalid_transaction(self, transaction):
self.setup_transaction(transaction)
with self.assertRaises(MultipleInvalid, msg=f"{transaction} is invalid"):
validate_transaction(transaction)
def check_transaction(self, transaction):
self.setup_transaction(transaction)
try:
validate_transaction(transaction)
except MultipleInvalid as e:
self.fail(f"MultipleInvalid {e.msg} thrown for {transaction}")
def check_transaction_str_type(self, object, key):
self.check_transaction({object: {key: "string"}})
self.check_invalid_transaction({object: {key: 12}})
def check_positive_number(self, f):
for good in (1, 1.1, Decimal("1.1")):
self.check_transaction(f(good))
for bad in ("1.2", "1", -1, -1.1, 0):
self.check_invalid_transaction(f(bad))
def check_bool(self, object, key):
for good in (True, False):
self.check_transaction({object: {key: good}})
for bad in ("", 0, "True"):
self.check_invalid_transaction({object: {key: bad}})
def setup_report(self, report):
if "ip_address" not in report:
report["ip_address"] = "1.2.3.4"
if "tag" not in report:
report["tag"] = "chargeback"
def check_invalid_report(self, report):
self.setup_report(report)
with self.assertRaises(MultipleInvalid, msg=f"{report} is invalid"):
validate_report(report)
def check_report(self, report):
self.setup_report(report)
try:
validate_report(report)
except MultipleInvalid as e:
self.fail(f"MultipleInvalid {e.msg} thrown for {report}")
def check_report_str_type(self, key):
self.check_report({key: "string"})
self.check_invalid_report({key: 12})
class TestTransaction(unittest.TestCase, ValidationBase):
def test_transaction_without_device(self):
transaction = {
"account": {
"user_id": "usr",
}
}
validate_transaction(transaction)
class TestAccount(unittest.TestCase, ValidationBase):
def test_account_user_id(self):
self.check_transaction({"account": {"user_id": "usr"}})
def test_account_username_md5(self):
self.check_transaction(
{"account": {"username_md5": "14c4b06b824ec593239362517f538b29"}}
)
def test_invalid_account_username_md5s(self):
self.check_invalid_transaction(
{"account": {"username_md5": "14c4b06b824ec593239362517f538b2"}}
)
self.check_invalid_transaction(
{"account": {"username_md5": "14c4b06b824ec593239362517f538b29a"}}
)
class AddressBase(ValidationBase):
def test_strings(self):
for key in (
"first_name",
"last_name",
"company",
"address",
"address_2",
"city",
"postal",
"phone_number",
):
self.check_transaction_str_type(self.type, key)
def test_region(self):
for region in ("A", "AA", "AAA", "ZZZZ"):
self.check_transaction({self.type: {"region": region}})
for invalid in ("", "AAAAA", 1, "aaa"):
self.check_invalid_transaction({self.type: {"region": invalid}})
def test_country(self):
for country in ("US", "CA", "GB"):
self.check_transaction({self.type: {"country": country}})
for invalid in ("", "U1", "USA", 1, "11", "us"):
self.check_invalid_transaction({self.type: {"country": invalid}})
def test_phone_country_code(self):
for code in (1, "1", "2341"):
self.check_transaction({self.type: {"phone_country_code": code}})
for invalid in ("", "12345", "U"):
self.check_invalid_transaction({self.type: {"phone_country_code": invalid}})
class TestBillingAddress(unittest.TestCase, AddressBase):
type = "billing"
class TestShippingAddress(unittest.TestCase, AddressBase):
type = "shipping"
def test_delivery_speed(self):
for speed in ("same_day", "overnight", "expedited", "standard"):
self.check_transaction({self.type: {"delivery_speed": speed}})
for invalid in ("fast", "slow", ""):
self.check_invalid_transaction({self.type: {"delivery_speed": invalid}})
class TestCreditCard(ValidationBase, unittest.TestCase):
def test_country(self):
for code in ("CA", "US"):
self.check_transaction({"credit_card": {"country": code}})
for invalid in (1, None, "", "A1", "Canada"):
self.check_invalid_transaction({"credit_card": {"country": invalid}})
def test_issuer_id_number(self):
for iin in ("123456", "532313", "88888888"):
self.check_transaction({"credit_card": {"issuer_id_number": iin}})
for invalid in ("12345", "1234567", 123456, "12345a"):
self.check_invalid_transaction(
{"credit_card": {"issuer_id_number": invalid}}
)
def test_last_digits(self):
for last_digits in ("1234", "9323", "34"):
self.check_transaction({"credit_card": {"last_digits": last_digits}})
for invalid in ("12345", "123", 1234, "123a"):
self.check_invalid_transaction({"credit_card": {"last_digits": invalid}})
self.check_transaction(
{"credit_card": {"issuer_id_number": "88888888", "last_digits": "12"}}
)
self.check_transaction(
{"credit_card": {"issuer_id_number": "88888888", "last_digits": "1234"}}
)
self.check_transaction(
{"credit_card": {"issuer_id_number": "666666", "last_digits": "1234"}}
)
self.check_transaction(
{"credit_card": {"issuer_id_number": "666666", "last_digits": "34"}}
)
def test_last_4_digits(self):
for last_digits in ("1234", "9323", "34"):
self.check_transaction({"credit_card": {"last_4_digits": last_digits}})
for invalid in ("12345", "123", 1234, "123a"):
self.check_invalid_transaction({"credit_card": {"last_4_digits": invalid}})
def test_bank_name(self):
self.check_transaction_str_type("credit_card", "bank_name")
def test_bank_phone_number(self):
self.check_transaction_str_type("credit_card", "bank_phone_number")
def test_phone_country_code(self):
for code in (1, "1", "2341"):
self.check_transaction({"credit_card": {"bank_phone_country_code": code}})
for invalid in ("", "12345", "U"):
self.check_invalid_transaction(
{"credit_card": {"bank_phone_country_code": invalid}}
)
def test_avs_and_cvv(self):
for key in ("avs_result", "cvv_result"):
for code in ("1", "A"):
self.check_transaction({"credit_card": {key: code}})
for invalid in ("", "12"):
self.check_invalid_transaction(
{"credit_card": {"credit_card": invalid}}
)
def test_token(self):
for token in ("123456abc1245", "\x21", "1" * 20):
self.check_transaction({"credit_card": {"token": token}})
for invalid in ("\x20", "123456", "x" * 256):
self.check_invalid_transaction({"credit_card": {"token": invalid}})
def test_was_3d_secure_successful(self):
self.check_bool("credit_card", "was_3d_secure_successful")
class TestCustomInputs(ValidationBase, unittest.TestCase):
def test_valid_inputs(self):
self.check_transaction(
{
"custom_inputs": {
"string_input_1": "test string",
"int_input": 19,
"float_input": 3.2,
"bool_input": True,
}
}
)
def test_invalid(self):
for invalid in (
{"InvalidKey": 1},
{"too_long": "x" * 256},
{"has_newline": "test\n"},
{"too_big": 1e13},
{"too_small": -1e13},
{"too_big_float": float(1e13)},
):
self.check_invalid_transaction({"custom_inputs": invalid})
class TestDevice(ValidationBase, unittest.TestCase):
def test_ip_address(self):
for ip in ("1.2.3.4", "2001:db8:0:0:1:0:0:1", "::FFFF:1.2.3.4"):
self.check_transaction({"device": {"ip_address": ip}})
for invalid in ("1.2.3.", "299.1.1.1", "::AF123"):
self.check_invalid_transaction({"device": {"ip_address": invalid}})
def test_missing_ip(self):
validate_transaction({"device": {}})
validate_transaction(
{
"device": {
"user_agent": "foo",
}
}
)
def test_missing_device(self):
validate_transaction({})
def test_user_agent(self):
self.check_transaction_str_type("device", "user_agent")
def test_accept_language(self):
self.check_transaction_str_type("device", "accept_language")
def test_session_id(self):
self.check_transaction_str_type("device", "session_id")
def test_session_age(self):
for valid in (3600, 0, 25.5):
self.check_transaction(
{"device": {"ip_address": "4.4.4.4", "session_age": valid}}
)
for invalid in ("foo", -1):
self.check_invalid_transaction(
{"device": {"ip_address": "4.4.4.4", "session_age": invalid}}
)
class TestEmail(ValidationBase, unittest.TestCase):
def test_address(self):
for good in ("test@maxmind.com", "977577b140bfb7c516e4746204fbdb01"):
self.check_transaction({"email": {"address": good}})
for bad in (
"not.email",
"977577b140bfb7c516e4746204fbdb0",
"977577b140bfb7c516e4746204fbdb012",
):
self.check_invalid_transaction({"email": {"address": bad}})
def test_domain(self):
for good in ("maxmind.com", "www.bbc.co.uk"):
self.check_transaction({"email": {"domain": good}})
for bad in ("bad ", " bad.com"):
self.check_invalid_transaction({"email": {"domain": bad}})
class TestEvent(ValidationBase, unittest.TestCase):
def test_transaction(self):
self.check_transaction_str_type("event", "transaction_id")
def test_shop_id(self):
self.check_transaction_str_type("event", "shop_id")
def test_time(self):
for good in ("2015-05-08T16:07:56+00:00", "2015-05-08T16:07:56Z"):
self.check_transaction({"event": {"time": good}})
for bad in ("2015-05-08T16:07:56", "2015-05-08 16:07:56Z"):
self.check_invalid_transaction({"event": {"time": bad}})
def test_type(self):
for good in (
"account_creation",
"account_login",
"email_change",
"password_reset",
"payout_change",
"purchase",
"recurring_purchase",
"referral",
"survey",
):
self.check_transaction({"event": {"type": good}})
for bad in ("bad", 1, ""):
self.check_invalid_transaction({"event": {"type": bad}})
class TestOrder(ValidationBase, unittest.TestCase):
def test_amount(self):
self.check_positive_number(lambda v: {"order": {"amount": v}})
def test_currency(self):
for good in ("USD", "GBP"):
self.check_transaction({"order": {"currency": good}})
for bad in ("US", "US1", "USDD", "usd"):
self.check_invalid_transaction({"order": {"currency": bad}})
def test_discount_code(self):
self.check_transaction_str_type("order", "discount_code")
def test_affiliate_id(self):
self.check_transaction_str_type("order", "affiliate_id")
def test_subaffiliate_id(self):
self.check_transaction_str_type("order", "subaffiliate_id")
def test_is_gift(self):
self.check_bool("order", "is_gift")
def test_has_gift_message(self):
self.check_bool("order", "has_gift_message")
def test_referrer_uri(self):
for good in ("http://www.mm.com/fadsf", "https://x.org/"):
self.check_transaction({"order": {"referrer_uri": good}})
for bad in ("ftp://a.com/", "www.mm.com"):
self.check_invalid_transaction({"order": {"referrer_uri": bad}})
class TestPayment(ValidationBase, unittest.TestCase):
def test_processor(self):
for good in ("adyen", "stripe"):
self.check_transaction({"payment": {"processor": good}})
for bad in ("notvalid", " stripe"):
self.check_invalid_transaction({"payment": {"processor": bad}})
def test_was_authorized(self):
self.check_bool("payment", "was_authorized")
def test_decline_code(self):
self.check_transaction_str_type("payment", "decline_code")
class TestShoppingCart(ValidationBase, unittest.TestCase):
def test_category(self):
self.check_transaction({"shopping_cart": [{"category": "cat"}]})
def test_item_id(self):
self.check_transaction({"shopping_cart": [{"item_id": "cat"}]})
def test_amount(self):
self.check_positive_number(lambda v: {"shopping_cart": [{"price": v}]})
def test_quantity(self):
for good in (1, 1000):
self.check_transaction({"shopping_cart": [{"quantity": good}]})
for bad in (1.1, -1, 0):
self.check_invalid_transaction({"shopping_cart": [{"quantity": bad}]})
class TestReport(unittest.TestCase, ValidationBase):
def test_ip_address(self):
for good in ("182.193.2.1", "a74:777f:3acd:57a0:4e7e:e999:7fe6:1b5b"):
self.check_report({"ip_address": good})
for bad in ("1.2.3.", "299.1.1.1", "::AF123", "", None):
self.check_invalid_report({"ip_address": bad})
def test_maxmind_id(self):
for good in ("12345678", "abcdefgh"):
self.check_report({"maxmind_id": good})
for bad in ("1234567", "123456789", "", None):
self.check_invalid_report({"maxmind_id": bad})
def test_minfraud_id(self):
for good in (
"12345678-1234-1234-1234-123456789012",
"1234-5678-1234-1234-1234-1234-5678-9012",
"12345678901234567890123456789012",
):
self.check_report({"minfraud_id": good})
for bad in (
"1234567812341234123412345678901",
"12345678-123412341234-12345678901",
"12345678-1234-1234-1234-1234567890123",
"12345678-1234-1234-1234-12345678901g",
"",
):
self.check_invalid_report({"minfraud_id": bad})
def test_strings(self):
for key in (
"chargeback_code",
"notes",
"transaction_id",
):
self.check_report_str_type(key)
def test_tag(self):
for good in ("chargeback", "not_fraud", "spam_or_abuse", "suspected_fraud"):
self.check_report({"tag": good})
for bad in ("risky_business", "", None):
self.check_invalid_report({"tag": bad})
|
maxmind/minfraud-api-python
|
tests/test_validation.py
|
Python
|
apache-2.0
| 15,656
|
"""Support for Subaru sensors."""
import subarulink.const as sc
from homeassistant.components.sensor import DEVICE_CLASSES
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
TIME_MINUTES,
VOLT,
VOLUME_GALLONS,
VOLUME_LITERS,
)
from homeassistant.util.distance import convert as dist_convert
from homeassistant.util.unit_system import (
IMPERIAL_SYSTEM,
LENGTH_UNITS,
PRESSURE_UNITS,
TEMPERATURE_UNITS,
)
from homeassistant.util.volume import convert as vol_convert
from .const import (
API_GEN_2,
DOMAIN,
ENTRY_COORDINATOR,
ENTRY_VEHICLES,
VEHICLE_API_GEN,
VEHICLE_HAS_EV,
VEHICLE_HAS_SAFETY_SERVICE,
VEHICLE_STATUS,
)
from .entity import SubaruEntity
L_PER_GAL = vol_convert(1, VOLUME_GALLONS, VOLUME_LITERS)
KM_PER_MI = dist_convert(1, LENGTH_MILES, LENGTH_KILOMETERS)
# Fuel Economy Constants
FUEL_CONSUMPTION_L_PER_100KM = "L/100km"
FUEL_CONSUMPTION_MPG = "mi/gal"
FUEL_CONSUMPTION_UNITS = [FUEL_CONSUMPTION_L_PER_100KM, FUEL_CONSUMPTION_MPG]
SENSOR_TYPE = "type"
SENSOR_CLASS = "class"
SENSOR_FIELD = "field"
SENSOR_UNITS = "units"
# Sensor data available to "Subaru Safety Plus" subscribers with Gen1 or Gen2 vehicles
SAFETY_SENSORS = [
{
SENSOR_TYPE: "Odometer",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.ODOMETER,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with Gen2 vehicles
API_GEN_2_SENSORS = [
{
SENSOR_TYPE: "Avg Fuel Consumption",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.AVG_FUEL_CONSUMPTION,
SENSOR_UNITS: FUEL_CONSUMPTION_L_PER_100KM,
},
{
SENSOR_TYPE: "Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.DIST_TO_EMPTY,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
{
SENSOR_TYPE: "Tire Pressure FL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure FR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "External Temp",
SENSOR_CLASS: DEVICE_CLASS_TEMPERATURE,
SENSOR_FIELD: sc.EXTERNAL_TEMP,
SENSOR_UNITS: TEMP_CELSIUS,
},
{
SENSOR_TYPE: "12V Battery Voltage",
SENSOR_CLASS: DEVICE_CLASS_VOLTAGE,
SENSOR_FIELD: sc.BATTERY_VOLTAGE,
SENSOR_UNITS: VOLT,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with PHEV vehicles
EV_SENSORS = [
{
SENSOR_TYPE: "EV Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_DISTANCE_TO_EMPTY,
SENSOR_UNITS: LENGTH_MILES,
},
{
SENSOR_TYPE: "EV Battery Level",
SENSOR_CLASS: DEVICE_CLASS_BATTERY,
SENSOR_FIELD: sc.EV_STATE_OF_CHARGE_PERCENT,
SENSOR_UNITS: PERCENTAGE,
},
{
SENSOR_TYPE: "EV Time to Full Charge",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_TIME_TO_FULLY_CHARGED,
SENSOR_UNITS: TIME_MINUTES,
},
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Subaru sensors by config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][ENTRY_COORDINATOR]
vehicle_info = hass.data[DOMAIN][config_entry.entry_id][ENTRY_VEHICLES]
entities = []
for vin in vehicle_info.keys():
entities.extend(create_vehicle_sensors(vehicle_info[vin], coordinator))
async_add_entities(entities, True)
def create_vehicle_sensors(vehicle_info, coordinator):
"""Instantiate all available sensors for the vehicle."""
sensors_to_add = []
if vehicle_info[VEHICLE_HAS_SAFETY_SERVICE]:
sensors_to_add.extend(SAFETY_SENSORS)
if vehicle_info[VEHICLE_API_GEN] == API_GEN_2:
sensors_to_add.extend(API_GEN_2_SENSORS)
if vehicle_info[VEHICLE_HAS_EV]:
sensors_to_add.extend(EV_SENSORS)
return [
SubaruSensor(
vehicle_info,
coordinator,
s[SENSOR_TYPE],
s[SENSOR_CLASS],
s[SENSOR_FIELD],
s[SENSOR_UNITS],
)
for s in sensors_to_add
]
class SubaruSensor(SubaruEntity):
"""Class for Subaru sensors."""
def __init__(
self, vehicle_info, coordinator, entity_type, sensor_class, data_field, api_unit
):
"""Initialize the sensor."""
super().__init__(vehicle_info, coordinator)
self.hass_type = "sensor"
self.current_value = None
self.entity_type = entity_type
self.sensor_class = sensor_class
self.data_field = data_field
self.api_unit = api_unit
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.sensor_class in DEVICE_CLASSES:
return self.sensor_class
return super().device_class
@property
def state(self):
"""Return the state of the sensor."""
self.current_value = self.get_current_value()
if self.current_value is None:
return None
if self.api_unit in TEMPERATURE_UNITS:
return round(
self.hass.config.units.temperature(self.current_value, self.api_unit), 1
)
if self.api_unit in LENGTH_UNITS:
return round(
self.hass.config.units.length(self.current_value, self.api_unit), 1
)
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round(
self.hass.config.units.pressure(self.current_value, self.api_unit),
1,
)
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round((100.0 * L_PER_GAL) / (KM_PER_MI * self.current_value), 1)
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
if self.api_unit in TEMPERATURE_UNITS:
return self.hass.config.units.temperature_unit
if self.api_unit in LENGTH_UNITS:
return self.hass.config.units.length_unit
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return self.hass.config.units.pressure_unit
return PRESSURE_HPA
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return FUEL_CONSUMPTION_MPG
return FUEL_CONSUMPTION_L_PER_100KM
return self.api_unit
@property
def available(self):
"""Return if entity is available."""
last_update_success = super().available
if last_update_success and self.vin not in self.coordinator.data:
return False
return last_update_success
def get_current_value(self):
"""Get raw value from the coordinator."""
value = self.coordinator.data[self.vin][VEHICLE_STATUS].get(self.data_field)
if value in sc.BAD_SENSOR_VALUES:
value = None
if isinstance(value, str):
if "." in value:
value = float(value)
else:
value = int(value)
return value
|
partofthething/home-assistant
|
homeassistant/components/subaru/sensor.py
|
Python
|
apache-2.0
| 7,898
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for AppendRows
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-storage
# [START bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync]
from google.cloud import bigquery_storage_v1beta2
def sample_append_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1beta2.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
# [END bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync]
|
googleapis/python-bigquery-storage
|
samples/generated_samples/bigquerystorage_v1beta2_generated_big_query_write_append_rows_sync.py
|
Python
|
apache-2.0
| 1,884
|
from os import makedirs
from os.path import join, dirname, exists
from string import Template
from galaxy.util.bunch import Bunch
from galaxy.objectstore import build_object_store_from_config
from .test_utils import TempDirectoryTestCase
from .test_objectstore import MockDataset
class PulsarObjectStoreTest(TempDirectoryTestCase):
def __write(self, contents, name):
path = join(self.temp_directory, name)
directory = dirname(path)
if not exists(directory):
makedirs(directory)
open(path, "wb").write(contents)
return path
def test_pulsar_objectstore(self):
# Define real object store used by Pulsar server.
object_store_config_file = join(self.temp_directory, "object_store_conf.xml")
with open(object_store_config_file, "w") as configf:
config_template = Template("""<?xml version="1.0"?>
<object_store type="disk">
<files_dir path="${temp_directory}"/>
<extra_dir type="temp" path="${temp_directory}"/>
<extra_dir type="job_work" path="${temp_directory}"/>
</object_store>
""")
config_contents = config_template.safe_substitute(temp_directory=self.temp_directory)
configf.write(config_contents)
app_conf = dict(
object_store_config_file=object_store_config_file,
private_token="12345",
)
from .test_utils import test_pulsar_server
with test_pulsar_server(app_conf=app_conf) as server:
url = server.application_url
# Define a proxy Pulsar object store.
proxy_object_store_config_file = join(self.temp_directory, "proxy_object_store_conf.xml")
with open(proxy_object_store_config_file, "w") as configf:
config_template = Template("""<?xml version="1.0"?>
<object_store type="pulsar" url="$url" private_token="12345" transport="urllib">
<!-- private_token is optional - see Pulsar documentation for more information. -->
<!-- transport is optional, set to curl to use libcurl instead of urllib for communication with Pulsar. -->
</object_store>
""")
contents = config_template.safe_substitute(url=url)
configf.write(contents)
config = Bunch(object_store_config_file=proxy_object_store_config_file)
object_store = build_object_store_from_config(config=config)
# Test no dataset with id 1 exists.
absent_dataset = MockDataset(1)
assert not object_store.exists(absent_dataset)
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
empty_dataset = MockDataset(2)
self.__write(b"", "000/dataset_2.dat")
assert object_store.exists(empty_dataset)
assert object_store.empty(empty_dataset)
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
hello_world_dataset = MockDataset(3)
self.__write(b"Hello World!", "000/dataset_3.dat")
assert object_store.exists(hello_world_dataset)
assert not object_store.empty(hello_world_dataset)
# Test get_data
data = object_store.get_data(hello_world_dataset)
assert data == "Hello World!"
data = object_store.get_data(hello_world_dataset, start=1, count=6)
assert data == "ello W"
# Test Size
# Test absent and empty datasets yield size of 0.
assert object_store.size(absent_dataset) == 0
assert object_store.size(empty_dataset) == 0
# Elsewise
assert object_store.size(hello_world_dataset) > 0 # Should this always be the number of bytes?
# Test percent used (to some degree)
percent_store_used = object_store.get_store_usage_percent()
assert percent_store_used > 0.0
assert percent_store_used < 100.0
# Test update_from_file test
output_dataset = MockDataset(4)
output_real_path = join(self.temp_directory, "000", "dataset_4.dat")
assert not exists(output_real_path)
output_working_path = self.__write(b"NEW CONTENTS", "job_working_directory1/example_output")
object_store.update_from_file(output_dataset, file_name=output_working_path, create=True)
assert exists(output_real_path)
# Test delete
to_delete_dataset = MockDataset(5)
to_delete_real_path = self.__write(b"content to be deleted!", "000/dataset_5.dat")
assert object_store.exists(to_delete_dataset)
assert object_store.delete(to_delete_dataset)
assert not object_store.exists(to_delete_dataset)
assert not exists(to_delete_real_path)
# Test json content.
complex_contents_dataset = MockDataset(6)
complex_content = b'{"a":6}'
self.__write(complex_content, "000/dataset_6.dat")
assert object_store.exists(complex_contents_dataset)
data = object_store.get_data(complex_contents_dataset) == complex_content
|
ssorgatem/pulsar
|
test/pulsar_objectstore_test.py
|
Python
|
apache-2.0
| 5,154
|
from tornado.process import cpu_count, _reseed_random
from tornado import ioloop
import logging
import os
import signal
import sys
from tornado.util import errno_from_exception
import errno
logger = logging.getLogger(__name__)
_task_id = None
exiting = False
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
logger.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
global exiting
exiting = False
def receive_signal(sig, frame):
logger.debug('Received signal')
global exiting
exiting = True
for pid, taskid in children.items():
os.kill(pid, signal.SIGTERM)
signal.signal(signal.SIGTERM, receive_signal)
signal.signal(signal.SIGINT, receive_signal)
num_restarts = 0
while children and not exiting:
logger.debug('Exiting : %s' % exiting)
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
logger.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
logger.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
logger.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
|
kevenli/scrapydd
|
scrapydd/process.py
|
Python
|
apache-2.0
| 3,812
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import binascii
import glob
import gzip
import io
import logging
import os
import pickle
import random
import re
import sys
import unittest
import zlib
from builtins import range
import crcmod
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import apache_beam as beam
from apache_beam import Create
from apache_beam import coders
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.tfrecordio import ReadAllFromTFRecord
from apache_beam.io.tfrecordio import ReadFromTFRecord
from apache_beam.io.tfrecordio import WriteToTFRecord
from apache_beam.io.tfrecordio import _TFRecordSink
from apache_beam.io.tfrecordio import _TFRecordUtil
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import TempDir
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
import tensorflow.compat.v1 as tf # pylint: disable=import-error
except ImportError:
try:
import tensorflow as tf # pylint: disable=import-error
except ImportError:
tf = None # pylint: disable=invalid-name
logging.warning('Tensorflow is not installed, so skipping some tests.')
# Created by running following code in python:
# >>> import tensorflow as tf
# >>> import base64
# >>> writer = tf.python_io.TFRecordWriter('/tmp/python_foo.tfrecord')
# >>> writer.write(b'foo')
# >>> writer.close()
# >>> with open('/tmp/python_foo.tfrecord', 'rb') as f:
# ... data = base64.b64encode(f.read())
# ... print(data)
FOO_RECORD_BASE64 = b'AwAAAAAAAACwmUkOZm9vYYq+/g=='
# Same as above but containing two records [b'foo', b'bar']
FOO_BAR_RECORD_BASE64 = b'AwAAAAAAAACwmUkOZm9vYYq+/gMAAAAAAAAAsJlJDmJhckYA5cg='
def _write_file(path, base64_records):
record = binascii.a2b_base64(base64_records)
with open(path, 'wb') as f:
f.write(record)
def _write_file_deflate(path, base64_records):
record = binascii.a2b_base64(base64_records)
with open(path, 'wb') as f:
f.write(zlib.compress(record))
def _write_file_gzip(path, base64_records):
record = binascii.a2b_base64(base64_records)
with gzip.GzipFile(path, 'wb') as f:
f.write(record)
class TestTFRecordUtil(unittest.TestCase):
def setUp(self):
self.record = binascii.a2b_base64(FOO_RECORD_BASE64)
def _as_file_handle(self, contents):
result = io.BytesIO()
result.write(contents)
result.seek(0)
return result
def _increment_value_at_index(self, value, index):
l = list(value)
if sys.version_info[0] <= 2:
l[index] = bytes(ord(l[index]) + 1)
return b"".join(l)
else:
l[index] = l[index] + 1
return bytes(l)
def _test_error(self, record, error_text):
with self.assertRaisesRegex(ValueError, re.escape(error_text)):
_TFRecordUtil.read_record(self._as_file_handle(record))
def test_masked_crc32c(self):
self.assertEqual(0xfd7fffa, _TFRecordUtil._masked_crc32c(b'\x00' * 32))
self.assertEqual(0xf909b029, _TFRecordUtil._masked_crc32c(b'\xff' * 32))
self.assertEqual(0xfebe8a61, _TFRecordUtil._masked_crc32c(b'foo'))
self.assertEqual(
0xe4999b0,
_TFRecordUtil._masked_crc32c(b'\x03\x00\x00\x00\x00\x00\x00\x00'))
def test_masked_crc32c_crcmod(self):
crc32c_fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
self.assertEqual(
0xfd7fffa,
_TFRecordUtil._masked_crc32c(
b'\x00' * 32, crc32c_fn=crc32c_fn))
self.assertEqual(
0xf909b029,
_TFRecordUtil._masked_crc32c(
b'\xff' * 32, crc32c_fn=crc32c_fn))
self.assertEqual(
0xfebe8a61, _TFRecordUtil._masked_crc32c(
b'foo', crc32c_fn=crc32c_fn))
self.assertEqual(
0xe4999b0,
_TFRecordUtil._masked_crc32c(
b'\x03\x00\x00\x00\x00\x00\x00\x00', crc32c_fn=crc32c_fn))
def test_write_record(self):
file_handle = io.BytesIO()
_TFRecordUtil.write_record(file_handle, b'foo')
self.assertEqual(self.record, file_handle.getvalue())
def test_read_record(self):
actual = _TFRecordUtil.read_record(self._as_file_handle(self.record))
self.assertEqual(b'foo', actual)
def test_read_record_invalid_record(self):
self._test_error(b'bar', 'Not a valid TFRecord. Fewer than 12 bytes')
def test_read_record_invalid_length_mask(self):
record = self._increment_value_at_index(self.record, 9)
self._test_error(record, 'Mismatch of length mask')
def test_read_record_invalid_data_mask(self):
record = self._increment_value_at_index(self.record, 16)
self._test_error(record, 'Mismatch of data mask')
def test_compatibility_read_write(self):
for record in [b'', b'blah', b'another blah']:
file_handle = io.BytesIO()
_TFRecordUtil.write_record(file_handle, record)
file_handle.seek(0)
actual = _TFRecordUtil.read_record(file_handle)
self.assertEqual(record, actual)
class TestTFRecordSink(unittest.TestCase):
def _write_lines(self, sink, path, lines):
f = sink.open(path)
for l in lines:
sink.write_record(f, l)
sink.close(f)
def test_write_record_single(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
record = binascii.a2b_base64(FOO_RECORD_BASE64)
sink = _TFRecordSink(
path,
coder=coders.BytesCoder(),
file_name_suffix='',
num_shards=0,
shard_name_template=None,
compression_type=CompressionTypes.UNCOMPRESSED)
self._write_lines(sink, path, [b'foo'])
with open(path, 'rb') as f:
self.assertEqual(f.read(), record)
def test_write_record_multiple(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
record = binascii.a2b_base64(FOO_BAR_RECORD_BASE64)
sink = _TFRecordSink(
path,
coder=coders.BytesCoder(),
file_name_suffix='',
num_shards=0,
shard_name_template=None,
compression_type=CompressionTypes.UNCOMPRESSED)
self._write_lines(sink, path, [b'foo', b'bar'])
with open(path, 'rb') as f:
self.assertEqual(f.read(), record)
@unittest.skipIf(tf is None, 'tensorflow not installed.')
class TestWriteToTFRecord(TestTFRecordSink):
def test_write_record_gzip(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
with TestPipeline() as p:
input_data = [b'foo', b'bar']
_ = p | beam.Create(input_data) | WriteToTFRecord(
file_path_prefix, compression_type=CompressionTypes.GZIP)
actual = []
file_name = glob.glob(file_path_prefix + '-*')[0]
for r in tf.python_io.tf_record_iterator(
file_name, options=tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(sorted(actual), sorted(input_data))
def test_write_record_auto(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
with TestPipeline() as p:
input_data = [b'foo', b'bar']
_ = p | beam.Create(input_data) | WriteToTFRecord(
file_path_prefix, file_name_suffix='.gz')
actual = []
file_name = glob.glob(file_path_prefix + '-*.gz')[0]
for r in tf.python_io.tf_record_iterator(
file_name, options=tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(sorted(actual), sorted(input_data))
class TestReadFromTFRecord(unittest.TestCase):
def test_process_single(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file(path, FOO_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True))
assert_that(result, equal_to([b'foo']))
def test_process_multiple(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_deflate(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file_deflate(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.DEFLATE,
validate=True))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_gzip(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.GZIP,
validate=True))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_auto(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result.gz')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_gzip(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path, compression_type=CompressionTypes.GZIP))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_gzip_auto(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result.gz')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| ReadFromTFRecord(
path, compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo', b'bar']))
class TestReadAllFromTFRecord(unittest.TestCase):
def _write_glob(self, temp_dir, suffix):
for _ in range(3):
path = temp_dir.create_temp_file(suffix)
_write_file(path, FOO_BAR_RECORD_BASE64)
def test_process_single(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file(path, FOO_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| Create([path])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo']))
def test_process_multiple(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| Create([path])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_glob(self):
with TempDir() as temp_dir:
self._write_glob(temp_dir, 'result')
glob = temp_dir.get_path() + os.path.sep + '*result'
with TestPipeline() as p:
result = (p
| Create([glob])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo', b'bar'] * 3))
def test_process_multiple_globs(self):
with TempDir() as temp_dir:
globs = []
for i in range(3):
suffix = 'result' + str(i)
self._write_glob(temp_dir, suffix)
globs.append(temp_dir.get_path() + os.path.sep + '*' + suffix)
with TestPipeline() as p:
result = (p
| Create(globs)
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo', b'bar'] * 9))
def test_process_deflate(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file_deflate(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| Create([path])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.DEFLATE))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_gzip(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| Create([path])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.GZIP))
assert_that(result, equal_to([b'foo', b'bar']))
def test_process_auto(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result.gz')
_write_file_gzip(path, FOO_BAR_RECORD_BASE64)
with TestPipeline() as p:
result = (p
| Create([path])
| ReadAllFromTFRecord(
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO))
assert_that(result, equal_to([b'foo', b'bar']))
class TestEnd2EndWriteAndRead(unittest.TestCase):
def create_inputs(self):
input_array = [[random.random() - 0.5 for _ in range(15)]
for _ in range(12)]
memfile = io.BytesIO()
pickle.dump(input_array, memfile)
return memfile.getvalue()
def test_end2end(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
# Generate a TFRecord file.
with TestPipeline() as p:
expected_data = [self.create_inputs() for _ in range(0, 10)]
_ = p | beam.Create(expected_data) | WriteToTFRecord(file_path_prefix)
# Read the file back and compare.
with TestPipeline() as p:
actual_data = p | ReadFromTFRecord(file_path_prefix + '-*')
assert_that(actual_data, equal_to(expected_data))
def test_end2end_auto_compression(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
# Generate a TFRecord file.
with TestPipeline() as p:
expected_data = [self.create_inputs() for _ in range(0, 10)]
_ = p | beam.Create(expected_data) | WriteToTFRecord(
file_path_prefix, file_name_suffix='.gz')
# Read the file back and compare.
with TestPipeline() as p:
actual_data = p | ReadFromTFRecord(file_path_prefix + '-*')
assert_that(actual_data, equal_to(expected_data))
def test_end2end_auto_compression_unsharded(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
# Generate a TFRecord file.
with TestPipeline() as p:
expected_data = [self.create_inputs() for _ in range(0, 10)]
_ = p | beam.Create(expected_data) | WriteToTFRecord(
file_path_prefix + '.gz', shard_name_template='')
# Read the file back and compare.
with TestPipeline() as p:
actual_data = p | ReadFromTFRecord(file_path_prefix + '.gz')
assert_that(actual_data, equal_to(expected_data))
@unittest.skipIf(tf is None, 'tensorflow not installed.')
def test_end2end_example_proto(self):
with TempDir() as temp_dir:
file_path_prefix = temp_dir.create_temp_file('result')
example = tf.train.Example()
example.features.feature['int'].int64_list.value.extend(list(range(3)))
example.features.feature['bytes'].bytes_list.value.extend(
[b'foo', b'bar'])
with TestPipeline() as p:
_ = p | beam.Create([example]) | WriteToTFRecord(
file_path_prefix, coder=beam.coders.ProtoCoder(example.__class__))
# Read the file back and compare.
with TestPipeline() as p:
actual_data = (p | ReadFromTFRecord(
file_path_prefix + '-*',
coder=beam.coders.ProtoCoder(example.__class__)))
assert_that(actual_data, equal_to([example]))
def test_end2end_read_write_read(self):
with TempDir() as temp_dir:
path = temp_dir.create_temp_file('result')
with TestPipeline() as p:
# Initial read to validate the pipeline doesn't fail before the file is
# created.
_ = p | ReadFromTFRecord(path + '-*', validate=False)
expected_data = [self.create_inputs() for _ in range(0, 10)]
_ = p | beam.Create(expected_data) | WriteToTFRecord(
path, file_name_suffix='.gz')
# Read the file back and compare.
with TestPipeline() as p:
actual_data = p | ReadFromTFRecord(path+'-*', validate=True)
assert_that(actual_data, equal_to(expected_data))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
RyanSkraba/beam
|
sdks/python/apache_beam/io/tfrecordio_test.py
|
Python
|
apache-2.0
| 18,843
|
# Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects.base import BaseObject
class TsigKey(BaseObject):
FIELDS = ['name', 'algorithm', 'secret']
|
richm/designate
|
designate/objects/tsigkey.py
|
Python
|
apache-2.0
| 756
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving and loading using tf's saved_model APIs with DS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import saved_model_test_base as test_base
from tensorflow.python.eager import test
from tensorflow.python.saved_model import saved_model
class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
def setUp(self):
self._root_dir = 'saved_model_save_load'
super(SavedModelSaveAndLoadTest, self).setUp()
def _save_model(self, model, saved_dir):
saved_model.save(model, saved_dir)
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name):
return test_base.load_and_run_with_saved_model_api(distribution, saved_dir,
predict_dataset,
output_name)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
distribution)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope)
if __name__ == '__main__':
test.main()
|
alsrgv/tensorflow
|
tensorflow/python/distribute/saved_model_save_load_test.py
|
Python
|
apache-2.0
| 3,531
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient import base
from keystoneclient import exceptions
from keystoneclient.v3.contrib.user_registration.utils import REGISTRATION_PATH
class Activation(base.Resource):
pass
class ActivationManager(base.CrudManager):
"""Manager class for activating user in the USER REGISTRATION extension for Keystone.
For more information about the extension: https://www.github.com/ging/keystone
"""
resource_class = Activation
collection_key = 'activate'
key = 'activation_key'
base_url = REGISTRATION_PATH
def new_activation_key(self, user):
base_url = self.base_url + '/users/{0}/'.format(base.getid(user))
return super(ActivationManager, self).get(base_url)
def activate_user(self, user, activation_key):
base_url = self.base_url + '/users/{0}/'.format(base.getid(user))
return super(ActivationManager, self).update(base_url, activation_key)
|
ging/python-keystoneclient
|
keystoneclient/v3/contrib/activation.py
|
Python
|
apache-2.0
| 1,528
|
import tornado.ioloop
import tornado.web
import tornado.websocket
import os
UPLOAD_FILE_PATH = "./tempfiles/"
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("./templates/main.html", title = "Main Page");
class EchoWebSocket(tornado.websocket.WebSocketHandler):
all_client = set()
def open(self):
print("WebSocket opened")
EchoWebSocket.all_client.add(self)
def on_message(self, message):
print message
for client in EchoWebSocket.all_client:
client.write_message(u"You said: " + message)
# self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
EchoWebSocket.all_client.remove(self)
class UploadHandler(tornado.web.RequestHandler):
def post(self):
uploadFile = self.request.files['uploadFile'][0]
filename = uploadFile['filename']
fileObj = open(UPLOAD_FILE_PATH + filename, 'wb')
fileObj.write(uploadFile['body'])
for client in EchoWebSocket.all_client:
client.write_message(u"You said: " + message)
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/websocket", EchoWebSocket),
(r'/upload', UploadHandler),
(r"/tempfiles/(.*)", tornado.web.StaticFileHandler, {"path": UPLOAD_FILE_PATH}),
], static_path = os.path.join(os.path.dirname(__file__), "static"), debug=True)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
shj00007/DODO
|
test.py
|
Python
|
apache-2.0
| 1,569
|
import glob
import os
import synapseclient
import uuid
from synapseclient import Project, File, Folder, Evaluation
from challenge import *
import challenge
from collections import OrderedDict
syn = synapseclient.Synapse()
syn.login()
CLEANUP = True
## module scope variable to hold project
project = None
evaluation = None
ad_challenge = challenge.ad_challenge_scoring
## point the scoring code at the test files rather than real challenge assets
ad_challenge.robjects.r('DATA_DIR <- "test_data"')
WIKI_TEMPLATE = """\
## Q1
${{supertable?path=%2Fevaluation%2Fsubmission%2Fquery%3Fquery%3Dselect+%2A+from+evaluation_{q1_evaluation_id}+where+status+%3D%3D+%22SCORED%22&paging=true&queryTableResults=true&showIfLoggedInOnly=false&pageSize=25&showRowNumber=false&jsonResultsKeyName=rows&columnConfig0=none%2CID%2CobjectId%3B%2CNONE&columnConfig1=none%2Cname%2Cname%3B%2CNONE&columnConfig2=synapseid%2Centity%2CentityId%3B%2CNONE&columnConfig3=none%2Cteam%2CsubmitterAlias%3B%2CNONE&columnConfig4=none%2CPearson-clin%2Ccorrelation_pearson_clin%3B%2CNONE%2C4&columnConfig5=none%2CPearson-clin-gen%2Ccorrelation_pearson_clin_gen%3B%2CNONE%2C4&columnConfig6=none%2CSpearman-clin%2Ccorrelation_spearman_clin%3B%2CNONE%2C4&columnConfig7=none%2CSpearman-clin-gen%2Ccorrelation_spearman_clin_gen%3B%2CNONE%2C4&columnConfig8=none%2CMean Rank%2Cmean_rank%3B%2CNONE%2C2&columnConfig9=none%2CFinal Rank%2Cfinal_rank%3B%2CNONE%2C2}}
## Q2
${{supertable?path=%2Fevaluation%2Fsubmission%2Fquery%3Fquery%3Dselect+%2A+from+evaluation_{q2_evaluation_id}+where+status+%3D%3D+%22SCORED%22&paging=true&queryTableResults=true&showIfLoggedInOnly=false&pageSize=25&showRowNumber=false&jsonResultsKeyName=rows&columnConfig0=none%2CID%2CobjectId%3B%2CNONE&columnConfig1=none%2Cname%2Cname%3B%2CNONE&columnConfig2=synapseid%2Centity%2CentityId%3B%2CNONE&columnConfig3=none%2Cteam%2CsubmitterAlias%3B%2CNONE&columnConfig4=none%2CAUC%2Cauc%3B%2CNONE%2C4&columnConfig5=none%2CAccuracy%2Caccuracy%3B%2CNONE%2C4}}
## Q3
${{supertable?path=%2Fevaluation%2Fsubmission%2Fquery%3Fquery%3Dselect+%2A+from+evaluation_{q3_evaluation_id}+where+status+%3D%3D+%22SCORED%22&paging=true&queryTableResults=true&showIfLoggedInOnly=false&pageSize=25&showRowNumber=false&jsonResultsKeyName=rows&columnConfig0=none%2CID%2CobjectId%3B%2CNONE&columnConfig1=none%2Cname%2Cname%3B%2CNONE&columnConfig2=synapseid%2Centity%2CentityId%3B%2CNONE&columnConfig3=none%2Cteam%2CsubmitterAlias%3B%2CNONE&columnConfig4=none%2CPercent Correct%2Cpercent_correct_diagnosis%3B%2CNONE%2C4&columnConfig5=none%2CPearson MMSE%2Cpearson_mmse%3B%2CNONE%2C4&columnConfig6=none%2CCCC MMSE%2Cccc_mmse%3B%2CNONE%2C4}}
"""
try:
challenge.syn = syn
project = syn.store(Project("Alzheimers scoring test project" + unicode(uuid.uuid4())))
print "Project:", project.id, project.name
q1_evaluation = syn.store(Evaluation(name=unicode(uuid.uuid4()), description="for testing Q1", contentSource=project.id))
print "Evaluation, Q1:", q1_evaluation.id
q2_evaluation = syn.store(Evaluation(name=unicode(uuid.uuid4()), description="for testing Q2", contentSource=project.id))
print "Evaluation, Q2:", q2_evaluation.id
q3_evaluation = syn.store(Evaluation(name=unicode(uuid.uuid4()), description="for testing Q3", contentSource=project.id))
print "Evaluation, Q3:", q3_evaluation.id
## fix up config_evaluations to refer to these evaluations
ad_challenge.config_evaluations[0]['id'] = int(q1_evaluation.id)
ad_challenge.config_evaluations[1]['id'] = int(q2_evaluation.id)
ad_challenge.config_evaluations[2]['id'] = int(q3_evaluation.id)
ad_challenge.config_evaluations_map = {ev['id']:ev for ev in ad_challenge.config_evaluations}
print "\n\nQ1 --------------------"
for filename in glob.iglob("test_data/q1.0*"):
entity = syn.store(File(filename, parent=project))
submission = syn.submit(q1_evaluation, entity, name=filename, teamName="Mean Squared Error Blues")
## submit one again that will be over quota, since we
## already have 1 good submission
entity = syn.store(File("test_data/q1.0001.txt", parent=project))
submission = syn.submit(q1_evaluation, entity, teamName="Mean Squared Error Blues")
list_submissions(q1_evaluation)
challenge_config = ad_challenge.config_evaluations_map[int(q1_evaluation.id)]
validate(q1_evaluation,
notifications=True,
send_messages=True,
validation_func=ad_challenge.validate_submission,
config=challenge_config,
submission_quota=1)
score(q1_evaluation,
notifications=True,
send_messages=True,
scoring_func=ad_challenge.score_submission,
config=challenge_config)
rank(q1_evaluation, fields=['correlation_pearson_clin',
'correlation_pearson_clin_gen',
'correlation_spearman_clin',
'correlation_spearman_clin_gen'])
print "\n\nQ2 --------------------"
for filename in glob.iglob("test_data/q2.0*"):
entity = syn.store(File(filename, parent=project))
submission = syn.submit(q2_evaluation, entity, name=filename, teamName="Mean Squared Error Blues")
list_submissions(q2_evaluation)
challenge_config = ad_challenge.config_evaluations_map[int(q2_evaluation.id)]
validate(q2_evaluation,
notifications=True,
send_messages=True,
validation_func=ad_challenge.validate_submission,
config=challenge_config)
score(q2_evaluation,
notifications=True,
send_messages=True,
scoring_func=ad_challenge.score_submission,
config=challenge_config)
rank(q2_evaluation, fields=['auc', 'accuracy'])
print "\n\nQ3 --------------------"
for filename in glob.iglob("test_data/q3.0*"):
entity = syn.store(File(filename, parent=project))
submission = syn.submit(q3_evaluation, entity, name=filename, teamName="Mean Squared Error Blues")
list_submissions(q3_evaluation)
challenge_config = ad_challenge.config_evaluations_map[int(q3_evaluation.id)]
validate(q3_evaluation,
notifications=True,
send_messages=True,
validation_func=ad_challenge.validate_submission,
config=challenge_config)
score(q3_evaluation,
notifications=True,
send_messages=True,
scoring_func=ad_challenge.score_submission,
config=challenge_config)
rank(q3_evaluation, fields=['pearson_mmse', 'ccc_mmse'])
wiki = Wiki(title="Leaderboards",
owner=project,
markdown=WIKI_TEMPLATE.format(
q1_evaluation_id=q1_evaluation.id,
q2_evaluation_id=q2_evaluation.id,
q3_evaluation_id=q3_evaluation.id))
wiki = syn.store(wiki)
finally:
if CLEANUP:
if evaluation:
syn.delete(evaluation)
if project:
syn.delete(project)
else:
print "don't clean up"
|
Sage-Bionetworks/DREAM_Alzheimers_Challenge_Scoring
|
test.py
|
Python
|
apache-2.0
| 6,996
|
# key point is to find the half node
class Node:
def __init__(self, val):
self.val = val
self.next = None
class LinkList:
def __init__(self):
self.head = None
def push(self, val):
node = Node(val)
if self.head:
node.next = self.head
self.head = node
else:
self.head = node
def printList(self):
p = self.head
while p:
print p.val,
p = p.next
print
def mergeSort(head):
if not head:
return
if not head.next:
return
slow = head
fast = head.next
while fast:
fast = fast.next
if fast:
slow = slow.next
fast = fast.next
# 2 3 20 5 10 15
frontHalf = head
backHalf = slow.next
slow.next = None
mergeSort(frontHalf)
mergeSort(backHalf)
head = sortedMerge(frontHalf, backHalf)
return head
def sortedMerge(a, b):
if not a:
return b
elif not b:
return a
temp = None
if a.val <= b.val:
temp = a
a.next = sortedMerge(temp.next, b)
return a
else:
temp = b
b.next = sortedMerge(a, temp.next)
return b
ll = LinkList()
ll.push(15)
ll.push(10)
ll.push(5)
ll.push(20)
ll.push(3)
ll.push(2)
ll.printList()
ll.head = mergeSort(ll.head)
ll.printList()
|
sonymoon/algorithm
|
src/main/python/geeksforgeeks/list/mmerge-sort-for-linked-list.py
|
Python
|
apache-2.0
| 1,384
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for instance_setup.py module."""
import subprocess
from google_compute_engine.instance_setup import instance_setup
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class InstanceSetupTest(unittest.TestCase):
def setUp(self):
self.mock_instance_config = mock.Mock()
self.mock_logger = mock.Mock()
self.mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
self.mock_setup.debug = False
self.mock_setup.instance_config = self.mock_instance_config
self.mock_setup.logger = self.mock_logger
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetup(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_watcher_instance = mock.Mock()
mock_watcher_instance.GetMetadata.return_value = {'hello': 'world'}
mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = True
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
# Setup and reading the configuration file.
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
# Setup for local SSD.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.setup._RunScript('optimize_local_ssd'),
# Setup for multiqueue virtio driver.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.setup._RunScript('set_multiqueue'),
# Check network access for reaching the metadata server.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.watcher.MetadataWatcher().GetMetadata(),
# Setup for SSH host keys if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_host_keys'),
mock.call.setup._SetSshHostKeys(),
# Setup for the boto config if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_boto_config'),
mock.call.setup._SetupBotoConfig(),
# Write the updated config file.
mock.call.config.InstanceConfig().WriteConfig(),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertEqual(mock_setup.metadata_dict, {'hello': 'world'})
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetupException(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = False
mock_config_instance.WriteConfig.side_effect = IOError('Test Error')
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.config.InstanceConfig().WriteConfig(),
mock.call.logger.Logger().warning('Test Error'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertIsNone(mock_setup.metadata_dict)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess')
def testRunScript(self, mock_subprocess):
mock_readline = mock.Mock()
mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')]
mock_stdout = mock.Mock()
mock_stdout.readline = mock_readline
mock_process = mock.Mock()
mock_process.poll.return_value = 0
mock_process.stdout = mock_stdout
mock_subprocess.Popen.return_value = mock_process
script = '/tmp/script.py'
instance_setup.InstanceSetup._RunScript(self.mock_setup, script)
expected_calls = [mock.call('a'), mock.call('b')]
self.assertEqual(self.mock_logger.info.mock_calls, expected_calls)
mock_subprocess.Popen.assert_called_once_with(
script, shell=True, stderr=mock_subprocess.STDOUT,
stdout=mock_subprocess.PIPE)
mock_process.poll.assert_called_once_with()
def testGetInstanceId(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}, 'id': 123}}
self.assertEqual(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup), '123')
self.mock_logger.warning.assert_not_called()
def testGetInstanceIdNotFound(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}}}
self.assertIsNone(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup))
self.assertEqual(self.mock_logger.warning.call_count, 1)
@mock.patch('google_compute_engine.instance_setup.instance_setup.file_utils.SetPermissions')
@mock.patch('google_compute_engine.instance_setup.instance_setup.shutil.move')
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.tempfile.NamedTemporaryFile')
def testGenerateSshKey(
self, mock_tempfile, mock_call, mock_move, mock_permissions):
mocks = mock.Mock()
mocks.attach_mock(mock_tempfile, 'tempfile')
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(mock_move, 'move')
mocks.attach_mock(mock_permissions, 'permissions')
mocks.attach_mock(self.mock_logger, 'logger')
key_type = 'key-type'
key_dest = '/key/dest'
temp_dest = '/tmp/dest'
mock_tempfile.return_value = mock_tempfile
mock_tempfile.__enter__.return_value.name = temp_dest
instance_setup.InstanceSetup._GenerateSshKey(
self.mock_setup, key_type, key_dest)
expected_calls = [
mock.call.tempfile(prefix=key_type, delete=True),
mock.call.tempfile.__enter__(),
mock.call.tempfile.__exit__(None, None, None),
mock.call.logger.info(mock.ANY, key_dest),
mock.call.call(
['ssh-keygen', '-t', key_type, '-f', temp_dest, '-N', '', '-q']),
mock.call.move(temp_dest, key_dest),
mock.call.move('%s.pub' % temp_dest, '%s.pub' % key_dest),
mock.call.permissions(key_dest, mode=0o600),
mock.call.permissions('%s.pub' % key_dest, mode=0o644),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
def testGenerateSshKeyProcessError(self, mock_call):
key_type = 'key-type'
key_dest = '/key/dest'
mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
instance_setup.InstanceSetup._GenerateSshKey(
self.mock_setup, key_type, key_dest)
self.mock_logger.info.assert_called_once_with(mock.ANY, key_dest)
self.mock_logger.warning.assert_called_once_with(mock.ANY, key_dest)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdSysVinit(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.side_effect = [False, False, True]
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [
mock.call.exists('/bin/systemctl'),
mock.call.exists('/etc/init.d/ssh'),
mock.call.exists('/etc/init/ssh.conf'),
mock.call.call(['service', 'ssh', 'start']),
mock.call.call(['service', 'ssh', 'reload']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdUpstart(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.side_effect = [False, False, False, False, True]
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [
mock.call.exists('/bin/systemctl'),
mock.call.exists('/etc/init.d/ssh'),
mock.call.exists('/etc/init/ssh.conf'),
mock.call.exists('/etc/init.d/sshd'),
mock.call.exists('/etc/init/sshd.conf'),
mock.call.call(['service', 'sshd', 'start']),
mock.call.call(['service', 'sshd', 'reload']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdSystemd(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.return_value = True
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [mock.call.exists('/bin/systemctl')]
self.assertEqual(mocks.mock_calls, expected_calls)
def testSetSshHostKeys(self):
self.mock_instance_config.GetOptionString.return_value = '123'
mock_instance_id = mock.Mock()
mock_instance_id.return_value = '123'
self.mock_setup._GetInstanceId = mock_instance_id
instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
self.mock_instance_config.SetOption.assert_not_called()
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.listdir')
def testSetSshHostKeysFirstBoot(self, mock_listdir):
self.mock_instance_config.GetOptionString.return_value = None
mock_instance_id = mock.Mock()
mock_instance_id.return_value = '123'
self.mock_setup._GetInstanceId = mock_instance_id
mock_generate_key = mock.Mock()
self.mock_setup._GenerateSshKey = mock_generate_key
mock_listdir.return_value = [
'ssh_config',
'ssh_host_rsa_key',
'ssh_host_dsa_key.pub',
'ssh_host_ed25519_key',
'ssh_host_ed25519_key.pub',
'ssh_host_rsa_key',
'ssh_host_rsa_key.pub',
]
instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
expected_calls = [
mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
mock.call('ed25519', '/etc/ssh/ssh_host_ed25519_key'),
mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
]
self.assertEqual(mock_generate_key.mock_calls, expected_calls)
self.mock_instance_config.SetOption.assert_called_once_with(
'Instance', 'instance_id', '123')
def testGetNumericProjectId(self):
self.mock_setup.metadata_dict = {
'project': {
'attributes': {},
'numericProjectId': 123,
}
}
self.assertEqual(
instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup),
'123')
self.mock_logger.warning.assert_not_called()
def testGetNumericProjectIdNotFound(self):
self.mock_setup.metadata_dict = {'project': {'attributes': {}}}
self.assertIsNone(
instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup))
self.assertEqual(self.mock_logger.warning.call_count, 1)
@mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
def testSetupBotoConfig(self, mock_boto):
mock_project_id = mock.Mock()
mock_project_id.return_value = '123'
self.mock_setup._GetNumericProjectId = mock_project_id
instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
mock_boto.assert_called_once_with('123', debug=False)
@mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
def testSetupBotoConfigLocked(self, mock_boto):
mock_boto.side_effect = IOError('Test Error')
instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
self.mock_logger.warning.assert_called_once_with('Test Error')
if __name__ == '__main__':
unittest.main()
|
Sarsate/compute-image-packages
|
google_compute_engine/instance_setup/tests/instance_setup_test.py
|
Python
|
apache-2.0
| 14,219
|
from setuptools import setup
setup(
name = "smbios_validation_tool",
author = "Xu Han",
author_email = "",
license = "Apache",
url = "https://github.com/google/smbios-validation-tool",
packages=['smbios_validation_tool', 'dmiparse'],
scripts=['smbios_validation'],
)
|
google/smbios-validation-tool
|
setup.py
|
Python
|
apache-2.0
| 296
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for DB API."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_db import api
from oslo_db import exception
from oslo_db.tests import utils as test_utils
sqla = importutils.try_import('sqlalchemy')
if not sqla:
raise ImportError("Unable to import module 'sqlalchemy'.")
def get_backend():
return DBAPI()
class DBAPI(object):
def _api_raise(self, *args, **kwargs):
"""Simulate raising a database-has-gone-away error
This method creates a fake OperationalError with an ID matching
a valid MySQL "database has gone away" situation. It also decrements
the error_counter so that we can artificially keep track of
how many times this function is called by the wrapper. When
error_counter reaches zero, this function returns True, simulating
the database becoming available again and the query succeeding.
"""
if self.error_counter > 0:
self.error_counter -= 1
orig = sqla.exc.DBAPIError(False, False, False)
orig.args = [2006, 'Test raise operational error']
e = exception.DBConnectionError(orig)
raise e
else:
return True
def api_raise_default(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
@api.safe_for_db_retry
def api_raise_enable_retry(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
def api_class_call1(_self, *args, **kwargs):
return args, kwargs
class DBAPITestCase(test_utils.BaseTestCase):
def test_dbapi_full_path_module_method(self):
dbapi = api.DBAPI('oslo_db.tests.test_api')
result = dbapi.api_class_call1(1, 2, kwarg1='meow')
expected = ((1, 2), {'kwarg1': 'meow'})
self.assertEqual(expected, result)
def test_dbapi_unknown_invalid_backend(self):
self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent')
def test_dbapi_lazy_loading(self):
dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True)
self.assertIsNone(dbapi._backend)
dbapi.api_class_call1(1, 'abc')
self.assertIsNotNone(dbapi._backend)
def test_dbapi_from_config(self):
conf = cfg.ConfigOpts()
dbapi = api.DBAPI.from_config(conf,
backend_mapping={'sqlalchemy': __name__})
self.assertIsNotNone(dbapi._backend)
class DBReconnectTestCase(DBAPITestCase):
def setUp(self):
super(DBReconnectTestCase, self).setUp()
self.test_db_api = DBAPI()
patcher = mock.patch(__name__ + '.get_backend',
return_value=self.test_db_api)
patcher.start()
self.addCleanup(patcher.stop)
def test_raise_connection_error(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise)
def test_raise_connection_error_decorated(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_enable_retry)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_raise_connection_error_enabled(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True)
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_default)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_retry_one(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 1
self.assertTrue(func(), 'Single retry did not succeed.')
except Exception:
self.fail('Single retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_two(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 2
self.assertTrue(func(), 'Multiple retry did not succeed.')
except Exception:
self.fail('Multiple retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_until_failure(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False,
max_retries=3)
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 5
self.assertRaises(
exception.DBError, func,
'Retry of permanent failure did not throw DBError exception.')
self.assertNotEqual(
0, self.test_db_api.error_counter,
'Retry did not stop after sql_max_retries iterations.')
class DBRetryRequestCase(DBAPITestCase):
def test_retry_wrapper_succeeds(self):
@api.wrap_db_retry(max_retries=10, retry_on_request=True)
def some_method():
pass
some_method()
def test_retry_wrapper_reaches_limit(self):
max_retries = 10
@api.wrap_db_retry(max_retries=10, retry_on_request=True)
def some_method(res):
res['result'] += 1
raise exception.RetryRequest(ValueError())
res = {'result': 0}
self.assertRaises(ValueError, some_method, res)
self.assertEqual(max_retries + 1, res['result'])
def test_retry_wrapper_exception_checker(self):
def exception_checker(exc):
return isinstance(exc, ValueError) and exc.args[0] < 5
@api.wrap_db_retry(max_retries=10, retry_on_request=True,
exception_checker=exception_checker)
def some_method(res):
res['result'] += 1
raise ValueError(res['result'])
res = {'result': 0}
self.assertRaises(ValueError, some_method, res)
# our exception checker should have stopped returning True after 5
self.assertEqual(5, res['result'])
@mock.patch.object(DBAPI, 'api_class_call1')
@mock.patch.object(api, 'wrap_db_retry')
def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method):
dbapi = api.DBAPI('oslo_db.tests.test_api')
dbapi.api_class_call1()
self.assertFalse(mocked_wrap.called)
|
magic0704/oslo.db
|
oslo_db/tests/test_api.py
|
Python
|
apache-2.0
| 8,075
|
import pytest
from etcdb import OperationalError
from etcdb.lock import WriteLock
def test_insert(cursor):
cursor.execute('CREATE TABLE t1(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255))')
cursor.execute("INSERT INTO t1(id, name) VALUES (1, 'aaa')")
cursor.execute("INSERT INTO t1(id, name) VALUES (2, 'bbb')")
cursor.execute("INSERT INTO t1(id, name) VALUES (3, 'ccc')")
cursor.execute("SELECT id, name FROM t1")
assert cursor.fetchall() == (
('1', 'aaa'),
('2', 'bbb'),
('3', 'ccc'),
)
def test_insert_wrong_lock_raises(cursor):
cursor.execute('CREATE TABLE t1(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255))')
cursor.execute("INSERT INTO t1(id, name) VALUES (1, 'aaa')")
with pytest.raises(OperationalError):
cursor.execute("INSERT INTO t1(id, name) VALUES (2, 'bbb') USE LOCK 'foo'")
def test_insert_with_lock(cursor, etcdb_connection):
cursor.execute('CREATE TABLE t1(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255))')
cursor.execute("INSERT INTO t1(id, name) VALUES (1, 'aaa')")
lock = WriteLock(etcdb_connection.client, 'foo', 't1')
lock.acquire()
cursor.execute("INSERT INTO t1(id, name) VALUES (2, 'bbb') USE LOCK '%s'" % lock.id)
lock.release()
cursor.execute("SELECT id, name FROM t1 WHERE id = 2")
assert cursor.fetchall() == (
('2', 'bbb'),
)
def test_insert_doesnt_release_lock(cursor, etcdb_connection):
cursor.execute('CREATE TABLE t1(id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255))')
cursor.execute("INSERT INTO t1(id, name) VALUES (1, 'aaa')")
lock = WriteLock(etcdb_connection.client, 'foo', 't1')
lock.acquire(ttl=0)
cursor.execute("UPDATE t1 SET name = 'bbb' WHERE id = 1 USE LOCK '%s'" % lock.id)
with pytest.raises(OperationalError):
cursor.execute("UPDATE t1 SET name = 'bbb' WHERE id = 1")
|
box/etcdb
|
tests/functional/dml/test_insert.py
|
Python
|
apache-2.0
| 1,931
|
# Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.host.transformer import HostTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources.prometheus import PROMETHEUS_DATASOURCE
from vitrage.datasources.prometheus.properties import get_label
from vitrage.datasources.prometheus.properties import PrometheusAlertLabels \
as PLabels
from vitrage.datasources.prometheus.properties import \
PrometheusAlertProperties as PProps
from vitrage.datasources.prometheus.properties import PrometheusAlertStatus \
as PAlertStatus
from vitrage.datasources.prometheus.transformer import PrometheusTransformer
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests.mocks import mock_transformer
from vitrage.tests.unit.datasources.test_alarm_transformer_base import \
BaseAlarmTransformerTest
# noinspection PyProtectedMember
class PrometheusTransformerTest(BaseAlarmTransformerTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PUSH),
]
def setUp(self):
super(PrometheusTransformerTest, self).setUp()
self.transformers = {}
self.conf_reregister_opts(self.OPTS, group=PROMETHEUS_DATASOURCE)
self.transformers[NOVA_HOST_DATASOURCE] = \
HostTransformer(self.transformers)
self.transformers[NOVA_INSTANCE_DATASOURCE] = \
InstanceTransformer(self.transformers)
self.transformers[PROMETHEUS_DATASOURCE] = \
PrometheusTransformer(self.transformers)
def test_create_update_entity_vertex(self):
# Test setup
host1 = 'host1'
instance_id = uuid.uuid4().hex
event_on_host = self._generate_event_on_host(host1)
event_on_instance = self._generate_event_on_instance(host1,
instance_id)
self.assertIsNotNone(event_on_host)
self.assertIsNotNone(event_on_instance)
# Test action
transformer = self.transformers[PROMETHEUS_DATASOURCE]
wrapper_for_host = transformer.transform(event_on_host)
wrapper_for_instance = transformer.transform(event_on_instance)
# Test assertions
self._validate_vertex_props(wrapper_for_host.vertex, event_on_host)
self._validate_vertex_props(wrapper_for_instance.vertex,
event_on_instance)
# Validate the neighbors: only one valid host neighbor
host_entity_key = transformer._create_entity_key(event_on_host)
host_entity_uuid = \
transformer.uuid_from_deprecated_vitrage_id(host_entity_key)
instance_entity_key = transformer._create_entity_key(event_on_instance)
instance_entity_uuid = \
transformer.uuid_from_deprecated_vitrage_id(instance_entity_key)
self._validate_host_neighbor(wrapper_for_host,
host_entity_uuid,
host1)
self._validate_instance_neighbor(wrapper_for_instance,
instance_entity_uuid,
instance_id)
# Validate the expected action on the graph - update or delete
self._validate_graph_action(wrapper_for_host)
self._validate_graph_action(wrapper_for_instance)
def _validate_vertex_props(self, vertex, event):
self._validate_alarm_vertex_props(
vertex, get_label(event, PLabels.ALERT_NAME),
PROMETHEUS_DATASOURCE, event[DSProps.SAMPLE_DATE])
def _generate_event_on_host(self, hostname):
# fake query result to be used by the transformer for determining
# the neighbor
query_result = [{VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE,
VProps.ID: hostname}]
labels = {PLabels.SEVERITY: 'critical',
PLabels.INSTANCE: hostname}
update_vals = {TransformerBase.QUERY_RESULT: query_result,
PProps.LABELS: labels}
return self._generate_event(update_vals)
def _generate_event_on_instance(self, hostname, instance_name):
# fake query result to be used by the transformer for determining
# the neighbor
query_result = [{VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE,
VProps.ID: instance_name}]
labels = {PLabels.SEVERITY: 'critical',
PLabels.INSTANCE: hostname,
PLabels.DOMAIN: instance_name}
update_vals = {TransformerBase.QUERY_RESULT: query_result,
PProps.LABELS: labels}
return self._generate_event(update_vals)
@staticmethod
def _generate_event(update_vals):
generators = mock_transformer.simple_prometheus_alarm_generators(
update_vals=update_vals)
return mock_transformer.generate_random_events_list(generators)[0]
def _is_erroneous(self, vertex):
return vertex[PProps.STATUS] == PAlertStatus.FIRING
|
openstack/vitrage
|
vitrage/tests/unit/datasources/prometheus/test_prometheus_transformer.py
|
Python
|
apache-2.0
| 6,042
|
import unittest
import sys
import inspect
from robot.running.handlers import _PythonHandler, _JavaHandler, DynamicHandler
from robot import utils
from robot.utils.asserts import *
from robot.running.testlibraries import TestLibrary
from robot.running.dynamicmethods import (
GetKeywordArguments, GetKeywordDocumentation, RunKeyword)
from robot.errors import DataError
from classes import NameLibrary, DocLibrary, ArgInfoLibrary
from ArgumentsPython import ArgumentsPython
if utils.JYTHON:
import ArgumentsJava
def _get_handler_methods(lib):
attrs = [getattr(lib, a) for a in dir(lib) if not a.startswith('_')]
return [a for a in attrs if inspect.ismethod(a)]
def _get_java_handler_methods(lib):
# This hack assumes that all java handlers used start with 'a_' -- easier
# than excluding 'equals' etc. otherwise
return [a for a in _get_handler_methods(lib) if a.__name__.startswith('a_') ]
class LibraryMock:
def __init__(self, name='MyLibrary', scope='GLOBAL'):
self.name = self.orig_name = name
self.scope = scope
class TestPythonHandler(unittest.TestCase):
def test_name(self):
for method in _get_handler_methods(NameLibrary()):
handler = _PythonHandler(LibraryMock('mylib'), method.__name__, method)
assert_equal(handler.name, method.__doc__)
assert_equal(handler.longname, 'mylib.'+method.__doc__)
def test_docs(self):
for method in _get_handler_methods(DocLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
assert_equal(handler.doc, method.expected_doc)
assert_equal(handler.shortdoc, method.expected_shortdoc)
def test_arguments(self):
for method in _get_handler_methods(ArgInfoLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
args = handler.arguments
argspec = (args.positional, args.defaults, args.varargs, args.kwargs)
expected = eval(method.__doc__)
assert_equal(argspec, expected, method.__name__)
def test_arg_limits(self):
for method in _get_handler_methods(ArgumentsPython()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
exp_mina, exp_maxa = eval(method.__doc__)
assert_equal(handler.arguments.minargs, exp_mina)
assert_equal(handler.arguments.maxargs, exp_maxa)
def test_getarginfo_getattr(self):
handlers = TestLibrary('classes.GetattrLibrary').handlers
assert_equal(len(handlers), 3)
for handler in handlers:
assert_true(handler.name in ['Foo','Bar','Zap'])
assert_equal(handler.arguments.minargs, 0)
assert_equal(handler.arguments.maxargs, sys.maxsize)
class TestDynamicHandlerCreation(unittest.TestCase):
def test_none_doc(self):
self._assert_doc(None, '')
def test_empty_doc(self):
self._assert_doc('')
def test_non_empty_doc(self):
self._assert_doc('This is some documentation')
def test_non_ascii_doc(self):
self._assert_doc(u'P\xe4iv\xe4\xe4')
if not utils.IRONPYTHON:
def test_with_utf8_doc(self):
doc = u'P\xe4iv\xe4\xe4'
self._assert_doc(doc.encode('UTF-8'), doc)
def test_invalid_doc_type(self):
self._assert_fails('Return value must be string.', doc=True)
def test_none_argspec(self):
self._assert_spec(None, maxargs=sys.maxsize, vararg='varargs', kwarg=False)
def test_none_argspec_when_kwargs_supported(self):
self._assert_spec(None, maxargs=sys.maxsize, vararg='varargs', kwarg='kwargs')
def test_empty_argspec(self):
self._assert_spec([])
def test_mandatory_args(self):
for argspec in [['arg'], ['arg1', 'arg2', 'arg3']]:
self._assert_spec(argspec, len(argspec), len(argspec), argspec)
def test_only_default_args(self):
self._assert_spec(['defarg1=value', 'defarg2=defvalue'], 0, 2,
['defarg1', 'defarg2'], ['value', 'defvalue'])
def test_default_value_may_contain_equal_sign(self):
self._assert_spec(['d=foo=bar'], 0, 1, ['d'], ['foo=bar'])
def test_varargs(self):
self._assert_spec(['*vararg'], 0, sys.maxsize, vararg='vararg')
def test_kwargs(self):
self._assert_spec(['**kwarg'], 0, 0, kwarg='kwarg')
def test_varargs_and_kwargs(self):
self._assert_spec(['*vararg', '**kwarg'],
0, sys.maxsize, vararg='vararg', kwarg='kwarg')
def test_integration(self):
self._assert_spec(['arg', 'default=value'], 1, 2,
['arg', 'default'], ['value'])
self._assert_spec(['arg', 'default=value', '*var'], 1, sys.maxsize,
['arg', 'default'], ['value'], 'var')
self._assert_spec(['arg', 'default=value', '**kw'], 1, 2,
['arg', 'default'], ['value'], None, 'kw')
self._assert_spec(['arg', 'default=value', '*var', '**kw'], 1, sys.maxsize,
['arg', 'default'], ['value'], 'var', 'kw')
def test_invalid_argspec_type(self):
for argspec in [True, [1, 2]]:
self._assert_fails("Return value must be list of strings.", argspec)
def test_mandatory_arg_after_default_arg(self):
for argspec in [['d=v', 'arg'], ['a', 'b', 'c=v', 'd']]:
self._assert_fails('Invalid argument specification: '
'Non-default argument after default arguments.',
argspec)
def test_positional_after_vararg(self):
for argspec in [['*foo', 'arg'], ['arg', '*var', 'arg'],
['a', 'b=d', '*var', 'c'], ['*var', '*vararg']]:
self._assert_fails('Invalid argument specification: '
'Positional argument after varargs.', argspec)
def test_kwarg_not_last(self):
for argspec in [['**foo', 'arg'], ['arg', '**kw', 'arg'],
['a', 'b=d', '**kw', 'c'], ['**kw', '*vararg'],
['**kw', '**kwarg']]:
self._assert_fails('Invalid argument specification: '
'Only last argument can be kwargs.', argspec)
def test_missing_kwargs_support(self):
self._assert_fails("Too few 'run_keyword' method parameters"
" for **kwargs support.",
['**kwargs'])
def _assert_doc(self, doc, expected=None):
expected = doc if expected is None else expected
assert_equal(self._create_handler(doc=doc).doc, expected)
def _assert_spec(self, argspec, minargs=0, maxargs=0, positional=[],
defaults=[], vararg=None, kwarg=None):
if kwarg is None:
kwargs_support_modes = [True, False]
elif kwarg is False:
kwargs_support_modes = [False]
kwarg = None
else:
kwargs_support_modes = [True]
for kwargs_support in kwargs_support_modes:
arguments = self._create_handler(argspec,
kwargs_support=kwargs_support
).arguments
assert_equal(arguments.minargs, minargs)
assert_equal(arguments.maxargs, maxargs)
assert_equal(arguments.positional, positional)
assert_equal(arguments.defaults, defaults)
assert_equal(arguments.varargs, vararg)
assert_equal(arguments.kwargs, kwarg)
def _assert_fails(self, error, argspec=None, doc=None):
assert_raises_with_msg(DataError, error,
self._create_handler, argspec, doc)
def _create_handler(self, argspec=None, doc=None, kwargs_support=False):
lib = LibraryMock('TEST CASE')
if kwargs_support:
lib.run_keyword = lambda name, args, kwargs: None
else:
lib.run_keyword = lambda name, args: None
lib.run_keyword.__name__ = 'run_keyword'
doc = GetKeywordDocumentation(lib)._handle_return_value(doc)
argspec = GetKeywordArguments(lib)._handle_return_value(argspec)
return DynamicHandler(lib, 'mock', RunKeyword(lib), doc, argspec)
if utils.JYTHON:
handlers = dict((method.__name__, method) for method in
_get_java_handler_methods(ArgumentsJava('Arg', ['varargs'])))
class TestJavaHandler(unittest.TestCase):
def test_arg_limits_no_defaults_or_varargs(self):
for count in [0, 1, 3]:
method = handlers['a_%d' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equal(handler.arguments.minargs, count)
assert_equal(handler.arguments.maxargs, count)
def test_arg_limits_with_varargs(self):
for count in [0, 1]:
method = handlers['a_%d_n' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equal(handler.arguments.minargs, count)
assert_equal(handler.arguments.maxargs, sys.maxsize)
def test_arg_limits_with_defaults(self):
# defaults i.e. multiple signatures
for mina, maxa in [(0, 1), (1, 3)]:
method = handlers['a_%d_%d' % (mina, maxa)]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equal(handler.arguments.minargs, mina)
assert_equal(handler.arguments.maxargs, maxa)
class TestArgumentCoercer(unittest.TestCase):
def setUp(self):
self.lib = TestLibrary('ArgTypeCoercion', ['42', 'true'])
def test_coercion_in_constructor(self):
instance = self.lib.get_instance()
assert_equal(instance.myInt, 42)
assert_equal(instance.myBool, True)
def test_coercing_to_integer(self):
self._test_coercion(self._handler_named('intArgument'),
['1'], [1])
def test_coercing_to_boolean(self):
handler = self._handler_named('booleanArgument')
self._test_coercion(handler, ['True'], [True])
self._test_coercion(handler, ['FALSE'], [ False])
def test_coercing_to_real_number(self):
self._test_coercion(self._handler_named('doubleArgument'),
['1.42'], [1.42])
self._test_coercion(self._handler_named('floatArgument'),
['-9991.098'], [-9991.098])
def test_coercion_with_compatible_types(self):
self._test_coercion(self._handler_named('coercableKeywordWithCompatibleTypes'),
['9999', '-42', 'FaLsE', '31.31'],
[9999, -42, False, 31.31])
def test_arguments_that_are_not_strings_are_not_coerced(self):
self._test_coercion(self._handler_named('intArgument'),
[self.lib], [self.lib])
self._test_coercion(self._handler_named('booleanArgument'),
[42], [42])
def test_coercion_fails_with_reasonable_message(self):
exp_msg = 'Argument at position 1 cannot be coerced to %s.'
self._test_coercion_fails(self._handler_named('intArgument'),
exp_msg % 'integer')
self._test_coercion_fails(self._handler_named('booleanArgument'),
exp_msg % 'boolean')
self._test_coercion_fails(self._handler_named('floatArgument'),
exp_msg % 'floating point number')
def test_no_arg_no_coercion(self):
self._test_coercion(self._handler_named('noArgument'), [], [])
def test_coercing_multiple_arguments(self):
self._test_coercion(self._handler_named('coercableKeyword'),
['10.0', '42', 'tRUe'], [10.0, 42, True])
def test_coercion_is_not_done_with_conflicting_signatures(self):
self._test_coercion(self._handler_named('unCoercableKeyword'),
['True', '42'], ['True', '42'])
def test_coercable_and_uncoercable_args_in_same_kw(self):
self._test_coercion(self._handler_named('coercableAndUnCoercableArgs'),
['1', 'False', '-23', '0'], ['1', False, -23, '0'])
def _handler_named(self, name):
return self.lib.handlers[name]
def _test_coercion(self, handler, args, expected):
assert_equal(handler._arg_coercer.coerce(args, {}), expected)
def _test_coercion_fails(self, handler, expected_message):
assert_raises_with_msg(ValueError, expected_message,
handler._arg_coercer.coerce, ['invalid'], {})
if __name__ == '__main__':
unittest.main()
|
synsun/robotframework
|
utest/running/test_handlers.py
|
Python
|
apache-2.0
| 13,048
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Property classes for building wrapper classes for Pikov nodes.
We want to wrap our semantic graph with Python classes. This allows us to
interact with Python objects to modify the guid_map.
These classes encode the core types used in the semantic graph. When classes
use these properties, the guid_map is updated with the correct serialization
of the property.
"""
from .core import Int64Node, StringNode
class AbstractSemanticGraphProperty(object):
def __init__(self, label):
self._label = label
def from_node(self, obj, value):
raise NotImplementedError()
def to_node(self, value):
raise NotImplementedError()
def __get__(self, obj, type=None):
return self.from_node(obj, obj[self._label])
def __set__(self, obj, value):
obj[self._label] = self.to_node(value)
class UnspecifiedProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
obj._graph.get_value(obj, self._label)
def to_node(self, value):
# Value should already by a Node.
return value
class GuidProperty(AbstractSemanticGraphProperty):
def __init__(self, label, cls):
super().__init__(label)
self._cls = cls
def from_node(self, obj, value):
if value is None:
return None
return self._cls(obj._graph, guid=value.guid)
def to_node(self, value):
# Value should already by a GuidNode.
return value
def make_guid_property(wrapped):
def __init__(self, label):
GuidProperty.__init__(self, label, wrapped)
return type(
wrapped.__name__ + "Property",
(GuidProperty,),
{
"__init__": __init__,
}
)
class ScalarProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
if value is None:
return None
return value.value
class Int64Property(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return Int64Node(value)
class StringProperty(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return StringNode(value)
|
google/pikov
|
python/pikov/properties.py
|
Python
|
apache-2.0
| 2,766
|
import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
redshift_username = os.environ.get("REDSHIFT_USERNAME")
redshift_password = os.environ.get("REDSHIFT_PASSWORD")
redshift_host = os.environ.get("REDSHIFT_HOST")
redshift_port = os.environ.get("REDSHIFT_PORT")
redshift_database = os.environ.get("REDSHIFT_DATABASE")
redshift_sslmode = os.environ.get("REDSHIFT_SSLMODE")
CONNECTION_STRING = f"postgresql+psycopg2://{redshift_username}:{redshift_password}@{redshift_host}:{redshift_port}/{redshift_database}?sslmode={redshift_sslmode}"
# This utility is not for general use. It is only to support testing.
from tests.test_utils import load_data_into_test_database
load_data_into_test_database(
table_name="taxi_data",
csv_path="./data/yellow_tripdata_sample_2019-01.csv",
connection_string=CONNECTION_STRING,
)
context = ge.get_context()
datasource_yaml = f"""
name: my_redshift_datasource
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
include_schema_name: true
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>",
CONNECTION_STRING,
)
context.test_yaml_config(datasource_yaml)
context.add_datasource(**yaml.load(datasource_yaml))
# First test for RuntimeBatchRequest using a query
batch_request = RuntimeBatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_name", # this can be anything that identifies this data
runtime_parameters={"query": "SELECT * from taxi_data LIMIT 10"},
batch_identifiers={"default_identifier_name": "default_identifier"},
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
# Second test for BatchRequest naming a table
batch_request = BatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="taxi_data", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_redshift_datasource"]
assert "taxi_data" in set(
context.get_available_data_asset_names()["my_redshift_datasource"][
"default_inferred_data_connector_name"
]
)
|
great-expectations/great_expectations
|
tests/integration/docusaurus/connecting_to_your_data/database/redshift_yaml_example.py
|
Python
|
apache-2.0
| 3,566
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentations."""
import functools
from typing import Any, Mapping, Text
import dm_pix as pix
import jax
import jax.numpy as jnp
# typing
JaxBatch = Mapping[Text, jnp.ndarray]
ConfigDict = Mapping[Text, Any]
augment_config = dict(
view1=dict(
random_flip=False, # Random left/right flip
color_transform=dict(
apply_prob=1.0,
# Range of jittering
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
# Probability of applying color jittering
color_jitter_prob=0.8,
# Probability of converting to grayscale
to_grayscale_prob=0.2,
# Shuffle the order of color transforms
shuffle=True),
gaussian_blur=dict(
apply_prob=1.0,
# Kernel size ~ image_size / blur_divider
blur_divider=10.,
# Kernel distribution
sigma_min=0.1,
sigma_max=2.0),
solarize=dict(apply_prob=0.0, threshold=0.5),
),
view2=dict(
random_flip=False,
color_transform=dict(
apply_prob=1.0,
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
shuffle=True),
gaussian_blur=dict(
apply_prob=0.1, blur_divider=10., sigma_min=0.1, sigma_max=2.0),
solarize=dict(apply_prob=0.2, threshold=0.5),
))
def postprocess(inputs: JaxBatch, rng: jnp.ndarray):
"""Apply the image augmentations to crops in inputs (view1 and view2)."""
def _postprocess_image(
images: jnp.ndarray,
rng: jnp.ndarray,
presets: ConfigDict,
) -> JaxBatch:
"""Applies augmentations in post-processing.
Args:
images: an NHWC tensor (with C=3), with float values in [0, 1].
rng: a single PRNGKey.
presets: a dict of presets for the augmentations.
Returns:
A batch of augmented images with shape NHWC, with keys view1, view2
and labels.
"""
flip_rng, color_rng, blur_rng, solarize_rng = jax.random.split(rng, 4)
out = images
if presets['random_flip']:
out = random_flip(out, flip_rng)
if presets['color_transform']['apply_prob'] > 0:
out = color_transform(out, color_rng, **presets['color_transform'])
if presets['gaussian_blur']['apply_prob'] > 0:
out = gaussian_blur(out, blur_rng, **presets['gaussian_blur'])
if presets['solarize']['apply_prob'] > 0:
out = solarize(out, solarize_rng, **presets['solarize'])
out = jnp.clip(out, 0., 1.)
return jax.lax.stop_gradient(out)
rng1, rng2 = jax.random.split(rng, num=2)
view1 = _postprocess_image(inputs['view1'], rng1, augment_config['view1'])
view2 = _postprocess_image(inputs['view2'], rng2, augment_config['view2'])
outputs = dict(view1=view1, view2=view2, labels=inputs['labels'])
for k in ['fh_segmentations1', 'fh_segmentations2',
'gt_segmentations1', 'gt_segmentations2']:
if k in inputs:
outputs[k] = inputs[k]
return outputs
def _maybe_apply(apply_fn, inputs, rng, apply_prob):
should_apply = jax.random.uniform(rng, shape=()) <= apply_prob
return jax.lax.cond(should_apply, inputs, apply_fn, inputs, lambda x: x)
def _random_gaussian_blur(image, rng, kernel_size, padding, sigma_min,
sigma_max, apply_prob):
"""Applies a random gaussian blur."""
apply_rng, transform_rng = jax.random.split(rng)
def _apply(image):
sigma_rng, = jax.random.split(transform_rng, 1)
sigma = jax.random.uniform(
sigma_rng,
shape=(),
minval=sigma_min,
maxval=sigma_max,
dtype=jnp.float32)
return pix.gaussian_blur(image, sigma, kernel_size, padding=padding)
return _maybe_apply(_apply, image, apply_rng, apply_prob)
def _color_transform_single_image(image, rng, brightness, contrast, saturation,
hue, to_grayscale_prob, color_jitter_prob,
apply_prob, shuffle):
"""Applies color jittering to a single image."""
apply_rng, transform_rng = jax.random.split(rng)
perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng = jax.random.split(
transform_rng, 7)
# Whether the transform should be applied at all.
should_apply = jax.random.uniform(apply_rng, shape=()) <= apply_prob
# Whether to apply grayscale transform.
should_apply_gs = jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob
# Whether to apply color jittering.
should_apply_color = jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob
# Decorator to conditionally apply fn based on an index.
def _make_cond(fn, idx):
def identity_fn(unused_rng, x):
return x
def cond_fn(args, i):
def clip(args):
return jax.tree_map(lambda arg: jnp.clip(arg, 0., 1.), args)
out = jax.lax.cond(should_apply & should_apply_color & (i == idx), args,
lambda a: clip(fn(*a)), args,
lambda a: identity_fn(*a))
return jax.lax.stop_gradient(out)
return cond_fn
random_brightness = functools.partial(
pix.random_brightness, max_delta=brightness)
random_contrast = functools.partial(
pix.random_contrast, lower=1-contrast, upper=1+contrast)
random_hue = functools.partial(pix.random_hue, max_delta=hue)
random_saturation = functools.partial(
pix.random_saturation, lower=1-saturation, upper=1+saturation)
to_grayscale = functools.partial(pix.rgb_to_grayscale, keep_dims=True)
random_brightness_cond = _make_cond(random_brightness, idx=0)
random_contrast_cond = _make_cond(random_contrast, idx=1)
random_saturation_cond = _make_cond(random_saturation, idx=2)
random_hue_cond = _make_cond(random_hue, idx=3)
def _color_jitter(x):
if shuffle:
order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32))
else:
order = range(4)
for idx in order:
if brightness > 0:
x = random_brightness_cond((b_rng, x), idx)
if contrast > 0:
x = random_contrast_cond((c_rng, x), idx)
if saturation > 0:
x = random_saturation_cond((s_rng, x), idx)
if hue > 0:
x = random_hue_cond((h_rng, x), idx)
return x
out_apply = _color_jitter(image)
out_apply = jax.lax.cond(should_apply & should_apply_gs, out_apply,
to_grayscale, out_apply, lambda x: x)
return jnp.clip(out_apply, 0., 1.)
def random_flip(images, rng):
rngs = jax.random.split(rng, images.shape[0])
return jax.vmap(pix.random_flip_left_right)(rngs, images)
def color_transform(images,
rng,
brightness=0.8,
contrast=0.8,
saturation=0.8,
hue=0.2,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
apply_prob=1.0,
shuffle=True):
"""Applies color jittering and/or grayscaling to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
brightness: the range of jitter on brightness.
contrast: the range of jitter on contrast.
saturation: the range of jitter on saturation.
hue: the range of jitter on hue.
color_jitter_prob: the probability of applying color jittering.
to_grayscale_prob: the probability of converting the image to grayscale.
apply_prob: the probability of applying the transform to a batch element.
shuffle: whether to apply the transforms in a random order.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
jitter_fn = functools.partial(
_color_transform_single_image,
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
color_jitter_prob=color_jitter_prob,
to_grayscale_prob=to_grayscale_prob,
apply_prob=apply_prob,
shuffle=shuffle)
return jax.vmap(jitter_fn)(images, rngs)
def gaussian_blur(images,
rng,
blur_divider=10.,
sigma_min=0.1,
sigma_max=2.0,
apply_prob=1.0):
"""Applies gaussian blur to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
blur_divider: the blurring kernel will have size H / blur_divider.
sigma_min: the minimum value for sigma in the blurring kernel.
sigma_max: the maximum value for sigma in the blurring kernel.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the blurred images.
"""
rngs = jax.random.split(rng, images.shape[0])
kernel_size = images.shape[1] / blur_divider
blur_fn = functools.partial(
_random_gaussian_blur,
kernel_size=kernel_size,
padding='SAME',
sigma_min=sigma_min,
sigma_max=sigma_max,
apply_prob=apply_prob)
return jax.vmap(blur_fn)(images, rngs)
def _solarize_single_image(image, rng, threshold, apply_prob):
solarize_fn = functools.partial(pix.solarize, threshold=threshold)
return _maybe_apply(solarize_fn, image, rng, apply_prob)
def solarize(images, rng, threshold=0.5, apply_prob=1.0):
"""Applies solarization.
Args:
images: an NHWC tensor (with C=3).
rng: a single PRNGKey.
threshold: the solarization threshold.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
solarize_fn = functools.partial(
_solarize_single_image, threshold=threshold, apply_prob=apply_prob)
return jax.vmap(solarize_fn)(images, rngs)
|
deepmind/detcon
|
utils/augmentations.py
|
Python
|
apache-2.0
| 10,529
|
"""
gene.py realize the methods that are related to system recommendation.
@author: Bowen
"""
from system.models import gene, reaction, compound, reaction_compound, compound_gene, pathway, pathway_compound, organism
from system.fasta_reader import parse_fasta_str
from elasticsearch import Elasticsearch
import traceback
import urllib2
import json
from django.db.models import Q
def search_compound(keyword):
"""
search compound based on the keyword
@param keyword: the keyword that the user typed. Which would be used in search
@type keyword: str
@return: return a list that contains searched compounds
@rtype: list
"""
es = Elasticsearch()
result = format_fuzzy_result(fuzzy_search_compound(es, keyword))
return result
def fuzzy_search_compound(es, keyword):
"""
fuzzy search compound based on the keyword with elasticsearch
@param es: the elasticsearch object
@param keyword: the search keyword
@type es: Elasticsearch
@type keyword: str
@return a dict generated by the elasticsearch, which contains the search result
@rtype: dict
"""
query_body = {
"from" : 0,
"size" : 20,
"query" : {
"fuzzy_like_this" : {
"fields" : ["name"],
"like_text" : keyword,
"max_query_terms" : 20
}
}
}
result = es.search(index="biodesigners", doc_type="compounds", body=query_body)
return result
def format_fuzzy_result(es_result):
"""
format the es search result to front end processable format
@param es_result: the es search result
@type es_result: dict
@return: the front end processable format, while will be like this::
[{'compound_id': id, 'name': name},...]
@rtype: list
"""
compound_result = es_result['hits']['hits']
result = list()
if len(compound_result) != 0:
for compound_item in compound_result:
info = compound_item['_source']
compound_info = {
'compound_id': info["compound_id"],
'name': info['name'],
}
result.append(compound_info)
return result
def get_gene_info(gid):
"""
get gene information from the database
@param gid: the gene id
@ytpe gid: str
@return: gene information dict
@rtype: dict
"""
try:
gene_obj = gene.objects.get(gene_id=gid)
result = {
'gene_id': gene_obj.gene_id,
'name': gene_obj.name,
'definition': gene_obj.definition,
'organism_short': gene_obj.organism_short,
'organism': gene_obj.organism
}
return True, result
except:
traceback.print_exc()
return False, None
def get_compound_info(cid):
"""
get a specific compound's information
@param cid: compound id
@type cid: str
@return: a tunple that contains is compound can be retrived and the information
@rtype: dict
"""
try:
compound_obj = compound.objects.get(compound_id=cid)
result = {
'compound_id' : compound_obj.compound_id,
'name': compound_obj.name,
'nicknames' : compound_obj.nicknames.replace('_', '\n'),
'formula' : compound_obj.formula,
'exact_mass' : compound_obj.exact_mass,
'mol_weight' : compound_obj.mol_mass
}
return True, result
except:
traceback.print_exc()
return False, None
class gene_graph:
"""
gene graph, including calculation and generate of gene & protein relation graph
"""
def __init__(self, cid_list, ogm):
"""
constructor for gene_graph class
@param cid_list: compound id list
@type cid_list: str
@param ogm: organisms
@type ogm:str
"""
if cid_list.startswith('_'):
cid_list = cid_list[1:]
if cid_list.endswith('_'):
cid_list = cid_list[:-1]
self.cid_list = cid_list.split('_')
self.nodes = list()
self.edges = list()
self.index_dict = dict()
self.index = 0
if ogm != None:
if ogm.startswith('_'):
ogm = ogm[1:]
if ogm.endswith('_'):
ogm = ogm[:-1]
self.organisms = ogm.split('_')
else:
self.organisms = None
def get_compound_object(self, cid):
"""
get compound object by compound id
@param cid: compound id
@type cid: str
@return: compound object or none if not found
@rtype: compound
"""
try:
compound_obj = compound.objects.get(compound_id=cid)
return compound_obj
except:
return None
def retrive_gene_detain(self, gid):
"""
get gene data from ncib
@param gid: gene id
@type gid: str
@return: gene information in dict or none
@rtype: dict
"""
#get information from ncbi
baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&retmode=json&version=2.0&id='
try:
req = urllib2.Request(baseUrl + gid)
response = urllib2.urlopen(req)
resStr = response.read()
result = json.loads(resStr)
infos = result['result'][gid]
detail_info = dict()
detail_info['name'] = infos['name']
detail_info['definition'] = infos['description']
detail_info['organism'] = infos['organism']['scientificname']
return detail_info
except:
traceback.print_exc()
return None
def related_compound(self, cid):
"""
find a compound's related compound
@param cid: compound id
@type cid: str
@return: list of related compound
@rtype: list
"""
compound_obj = self.get_compound_object(cid)
if self.organisms != None:
organism_pathway_id_list = pathway.objects.filter(organism_id__in=self.organisms).values_list('pathway_id', flat=True)
else:
organism_pathway_id_list = pathway.objects.all()
valued_pathway_id_list = pathway_compound.objects.filter(pathway_id__in=organism_pathway_id_list, compound=compound_obj)
valued_compound_list = pathway_compound.objects.filter(Q(pathway_id__in=valued_pathway_id_list), ~Q(compound=compound_obj)).values_list('compound', flat=True)
compound_list = compound.objects.filter(compound_id__in=valued_compound_list)
return compound_list
def create_node(self, name, id):
"""
create a node (gene or compound) in the graph
@param name: name for the node
@param id: id for the node
@type name : str
@type id : str
"""
node_info = {
'name': name,
'id': id
}
self.nodes.append(node_info)
if id in self.index_dict.keys():
return True
self.index_dict[id] = self.index
self.index += 1
return True
def create_n_link(self, center_node, compound_obj):
"""
create nodes and link them
@param center_node: source node
@type center_node:compound
@param compound_obj: compound object
@type compound_obj: compound
"""
gene_list = self.search_gene(compound_obj)
for gene_id in gene_list:
try:
gene_obj = gene.objects.get(gene_id=gene_id)
if self.create_node(gene_obj.name, gene_obj.gene_id):
edge_info = {
'source' : self.index_dict[center_node],
'target' : self.index_dict[gene_obj.gene_id],
'relation' : compound_obj.name
}
self.edges.append(edge_info)
except:
traceback.print_exc()
pass
return gene_list[0]
def get_or_create_gene(self, gid):
"""
find gene in database, if found, return gene, or search in ncbi
@param gid: gene id
@type gid: str
@return gene object
@rtype: gene
"""
#get in database
try:
gene_obj = gene.objects.get(gene_id=gid)
return gene_obj
except:
#get from ncbi
baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&rettype=fasta&id='
req = urllib2.Request(baseUrl + gid)
response = urllib2.urlopen(req)
resStr = response.read()
gene_dict = parse_fasta_str(resStr)
for gn in gene_dict.keys():
gid = gn.split('|')[1]
#get detail information
new_gene_obj = gene(gene_id=gid)
detail_info = self.retrive_gene_detain(gid)
if detail_info == None:
continue
new_gene_obj.name = detail_info['name']
new_gene_obj.definition = detail_info['definition']
new_gene_obj.organism = detail_info['organism']
new_gene_obj.ntseq = gene_dict[gn]
new_gene_obj.ntseq_length = len(gene_dict[gn])
try:
new_gene_obj.save()
return new_gene_obj
except:
pass
return None
def save_relation_to_db(self, geneIdList, compound_obj):
"""
save relation between compound_obj and gene to database
@param geneIdList: gene id in a list
@type geneIdList: list
@param compound_obj: compound object
@type compound_obj: compound
"""
#create new obj
for gid in geneIdList:
new_rela_obj = compound_gene(compound=compound_obj)
gene_obj = self.get_or_create_gene(gid)
if gene_obj == None:
continue
new_rela_obj.gene = gene_obj
try:
new_rela_obj.save()
except:
pass
def search_gene(self, compound_obj):
"""
find gene realted to a compound
@param compound_obj: the compound object
@type compound_obj: compound
@return related genes
@rtype: list
"""
#search in database
obj_list = compound_gene.objects.filter(compound=compound_obj)
if len(obj_list) != 0:
geneIdList = list()
for obj in obj_list:
geneIdList.append(obj.gene.gene_id)
return geneIdList[:2]
else:
baseGeneFindUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&retmode=json&term='
try:
req = urllib2.Request(baseGeneFindUrl + compound_obj.name)
response = urllib2.urlopen(req)
resStr = response.read()
except:
traceback.print_exc()
return None
if len(resStr) == 0:
return None
result = json.loads(resStr)
geneIdList = result['esearchresult']['idlist']
self.save_relation_to_db(geneIdList, compound_obj)
return geneIdList[:2]
def cal_graph(self):
"""
calculate the relation graph
"""
for cid in self.cid_list:
center_compound_obj = self.get_compound_object(cid)
if center_compound_obj == None:
continue
self.create_node(center_compound_obj.name, center_compound_obj.compound_id)
related_list = self.related_compound(center_compound_obj.compound_id)[:5]
for compound_obj in related_list:
new_center = self.create_n_link(center_compound_obj.compound_id, compound_obj)
self.create_node(compound_obj.name, compound_obj.compound_id)
edge_info = {
'source': self.index_dict[center_compound_obj.compound_id],
'target': self.index_dict[compound_obj.compound_id],
'relation': compound_obj.name,
}
deep_related_list = self.related_compound(compound_obj.compound_id)[:2]
for deep_compound_obj in deep_related_list:
self.create_n_link(compound_obj.compound_id, deep_compound_obj)
def get_graph(self):
"""
get the graph
@return: th graph
@rtype: dict
"""
result = {
'nodes': self.nodes,
'edges' : self.edges
}
return result
'''
def find_related_compound(cid_str):
"""
find the compound that are related to current compound in reaction
@param cid: list of compound id
@type cid: list
@return: dict of compound that are related to the compound, empty list will be returned if there is no related compound
@rtype: dict
"""
result = dict()
nodes = list()
edges = list()
all_genes = list()
index_dict = dict()
index = 0
if cid_str.endswith('_'):
cid_str = cid_str[:-1]
cid_list = cid_str.split('_')
for cid in cid_list:
try:
compound_obj = compound.objects.get(compound_id=cid)
#get first gene and create new node
cen_gene_id = None
try:
cen_gene_id = search_gene_in_ncbi(compound_obj.name,)[0]
if not cen_gene_id in all_genes:
all_genes.append(cen_gene_id)
gene_obj = gene.objects.get(gene_id=cen_gene_id)
node_info = {
'name': gene_obj.name,
'id': gene_obj.gene_id
}
nodes.append(node_info)
index_dict[cen_gene_id] = index
index += 1
except:
pass
# find related reactions
rid_list = reaction_compound.objects.filter(compound=compound_obj, isReactant=True).values_list('reaction_id', flat=True)
cname_list = list()
for rid in rid_list:
rs = reaction_compound.objects.filter(Q(reaction_id=rid), ~Q(compound=compound_obj))[:5]
for r in rs:
cname_list.append(r.compound.name)
for cname in cname_list:
# find genes
gene_list = search_gene_in_ncbi(cname, expect=cen_gene_id, index=1)
for gene_id in gene_list:
if gene_id in all_genes:
continue
try:
gene_obj = gene.objects.get(gene_id=gene_id)
#create new node
all_genes.append(gene_id)
node_info = {
'name' : gene_obj.name,
'id': gene_obj.gene_id
}
nodes.append(node_info)
index_dict[gene_obj.gene_id] = index
index += 1
# add edge
edge_info = {
'source': index_dict[cen_gene_id],
'target': index_dict[gene_obj.gene_id],
'relation': cname
}
edges.append(edge_info)
except:
traceback.print_exc()
pass
except:
traceback.print_exc()
pass
result = {
'nodes': nodes,
'edges': edges
}
return result
'''
|
igemsoftware/HFUT-China_2015
|
system/gene.py
|
Python
|
apache-2.0
| 15,855
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.service_attachments import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import ServiceAttachmentsTransport, DEFAULT_CLIENT_INFO
from .transports.rest import ServiceAttachmentsRestTransport
class ServiceAttachmentsClientMeta(type):
"""Metaclass for the ServiceAttachments client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ServiceAttachmentsTransport]]
_transport_registry["rest"] = ServiceAttachmentsRestTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ServiceAttachmentsTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ServiceAttachmentsClient(metaclass=ServiceAttachmentsClientMeta):
"""The ServiceAttachments API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceAttachmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceAttachmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ServiceAttachmentsTransport:
"""Returns the transport used by the client instance.
Returns:
ServiceAttachmentsTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ServiceAttachmentsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the service attachments client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ServiceAttachmentsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ServiceAttachmentsTransport):
# transport is a ServiceAttachmentsTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def aggregated_list(
self,
request: Union[compute.AggregatedListServiceAttachmentsRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.AggregatedListPager:
r"""Retrieves the list of all ServiceAttachment
resources, regional and global, available to the
specified project.
Args:
request (Union[google.cloud.compute_v1.types.AggregatedListServiceAttachmentsRequest, dict]):
The request object. A request message for
ServiceAttachments.AggregatedList. See the method
description for details.
project (str):
Name of the project scoping this
request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.service_attachments.pagers.AggregatedListPager:
Contains a list of
ServiceAttachmentsScopedList.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.AggregatedListServiceAttachmentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.AggregatedListServiceAttachmentsRequest):
request = compute.AggregatedListServiceAttachmentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.aggregated_list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.AggregatedListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_unary(
self,
request: Union[compute.DeleteServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
service_attachment: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified ServiceAttachment in the given
scope
Args:
request (Union[google.cloud.compute_v1.types.DeleteServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.Delete. See the method description
for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Name of the region of this request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_attachment (str):
Name of the ServiceAttachment
resource to delete.
This corresponds to the ``service_attachment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, service_attachment])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteServiceAttachmentRequest):
request = compute.DeleteServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if service_attachment is not None:
request.service_attachment = service_attachment
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: Union[compute.GetServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
service_attachment: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.ServiceAttachment:
r"""Returns the specified ServiceAttachment resource in
the given scope.
Args:
request (Union[google.cloud.compute_v1.types.GetServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.Get. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Name of the region of this request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_attachment (str):
Name of the ServiceAttachment
resource to return.
This corresponds to the ``service_attachment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.ServiceAttachment:
Represents a ServiceAttachment
resource. A service attachment
represents a service that a producer has
exposed. It encapsulates the load
balancer which fronts the service runs
and a list of NAT IP ranges that the
producers uses to represent the
consumers connecting to the service.
next tag = 20
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, service_attachment])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetServiceAttachmentRequest):
request = compute.GetServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if service_attachment is not None:
request.service_attachment = service_attachment
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_iam_policy(
self,
request: Union[compute.GetIamPolicyServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Gets the access control policy for a resource. May be
empty if no such policy or resource exists.
Args:
request (Union[google.cloud.compute_v1.types.GetIamPolicyServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.GetIamPolicy. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region for this
request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
Policy is a collection of bindings. A binding binds one
or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role is
a named list of permissions; each role can be an IAM
predefined role or a user-created custom role. For some
types of Google Cloud resources, a binding can also
specify a condition, which is a logical expression that
allows access to a resource only if the expression
evaluates to true. A condition can add constraints based
on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:\ eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the [IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetIamPolicyServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetIamPolicyServiceAttachmentRequest):
request = compute.GetIamPolicyServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_iam_policy]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert_unary(
self,
request: Union[compute.InsertServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
service_attachment_resource: compute.ServiceAttachment = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a ServiceAttachment in the specified project
in the given scope using the parameters that are
included in the request.
Args:
request (Union[google.cloud.compute_v1.types.InsertServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.Insert. See the method description
for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Name of the region of this request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment):
The body resource for this request
This corresponds to the ``service_attachment_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, service_attachment_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertServiceAttachmentRequest):
request = compute.InsertServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if service_attachment_resource is not None:
request.service_attachment_resource = service_attachment_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: Union[compute.ListServiceAttachmentsRequest, dict] = None,
*,
project: str = None,
region: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Lists the ServiceAttachments for a project in the
given scope.
Args:
request (Union[google.cloud.compute_v1.types.ListServiceAttachmentsRequest, dict]):
The request object. A request message for
ServiceAttachments.List. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Name of the region of this request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.service_attachments.pagers.ListPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListServiceAttachmentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListServiceAttachmentsRequest):
request = compute.ListServiceAttachmentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def patch_unary(
self,
request: Union[compute.PatchServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
service_attachment: str = None,
service_attachment_resource: compute.ServiceAttachment = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Patches the specified ServiceAttachment resource with
the data included in the request. This method supports
PATCH semantics and uses JSON merge patch format and
processing rules.
Args:
request (Union[google.cloud.compute_v1.types.PatchServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.Patch. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The region scoping this request and
should conform to RFC1035.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_attachment (str):
The resource id of the
ServiceAttachment to patch. It should
conform to RFC1035 resource name or be a
string form on an unsigned long number.
This corresponds to the ``service_attachment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment):
The body resource for this request
This corresponds to the ``service_attachment_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, region, service_attachment, service_attachment_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.PatchServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.PatchServiceAttachmentRequest):
request = compute.PatchServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if service_attachment is not None:
request.service_attachment = service_attachment
if service_attachment_resource is not None:
request.service_attachment_resource = service_attachment_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.patch]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_iam_policy(
self,
request: Union[compute.SetIamPolicyServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
resource: str = None,
region_set_policy_request_resource: compute.RegionSetPolicyRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Sets the access control policy on the specified
resource. Replaces any existing policy.
Args:
request (Union[google.cloud.compute_v1.types.SetIamPolicyServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.SetIamPolicy. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region for this
request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest):
The body resource for this request
This corresponds to the ``region_set_policy_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
Policy is a collection of bindings. A binding binds one
or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role is
a named list of permissions; each role can be an IAM
predefined role or a user-created custom role. For some
types of Google Cloud resources, a binding can also
specify a condition, which is a logical expression that
allows access to a resource only if the expression
evaluates to true. A condition can add constraints based
on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:\ eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the [IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, region, resource, region_set_policy_request_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetIamPolicyServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetIamPolicyServiceAttachmentRequest):
request = compute.SetIamPolicyServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if resource is not None:
request.resource = resource
if region_set_policy_request_resource is not None:
request.region_set_policy_request_resource = (
region_set_policy_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: Union[compute.TestIamPermissionsServiceAttachmentRequest, dict] = None,
*,
project: str = None,
region: str = None,
resource: str = None,
test_permissions_request_resource: compute.TestPermissionsRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TestPermissionsResponse:
r"""Returns permissions that a caller has on the
specified resource.
Args:
request (Union[google.cloud.compute_v1.types.TestIamPermissionsServiceAttachmentRequest, dict]):
The request object. A request message for
ServiceAttachments.TestIamPermissions. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region for this
request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest):
The body resource for this request
This corresponds to the ``test_permissions_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.TestPermissionsResponse:
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, region, resource, test_permissions_request_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.TestIamPermissionsServiceAttachmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.TestIamPermissionsServiceAttachmentRequest):
request = compute.TestIamPermissionsServiceAttachmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if resource is not None:
request.resource = resource
if test_permissions_request_resource is not None:
request.test_permissions_request_resource = (
test_permissions_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ServiceAttachmentsClient",)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/service_attachments/client.py
|
Python
|
apache-2.0
| 59,785
|
from json import dumps
import requests
d = {'yolar': 535, 'galaxy': 536, 'solar': 551, 'swordkeeper': 552, 'civilization': 553, 'CurvatureDrive': 554,
'singer': 555, 'hdfragments': 556, 'evans': 557, 'dagon': 558, 'di-foil': 559, 'dimension': 560, 'farmer': 562,
'hibernation': 564, 'huformation': 565, 'mentalseal': 566, 'midas': 567, 'momentum': 568, 'owl': 569,
'shooter': 570, 'sophon': 571, 'bye': 573, 'cms': 575, 'nsk': 576, 'painter': 577, 'redcoast': 578,
'scientificboundary': 579, 'wall-breaker': 580}
head = {
'Accept': 'application/json',
'Authorization': 'eyJhbGciOiJIUzI1NiIsImlhdCI6MTQ5NzU4MDY5NywiZXhwIjoxNTEzMTMyNjk3fQ.eyJpZCI6MjB9.uo8JdyzBBQ-oGxzMyoiFDlycWk-fqagZLVgwrwqTSBM',
'Content-Type': 'application/json',
'Host': 'api.wayknew.com',
'Referer': 'https://wayknew.com/articles/535/edit',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
def write_to_wayknew(repo_name, str_doc):
if repo_name not in d.keys():
print('article not in wayknew, please create a null article in wayknew!')
exit(-1)
title = repo_name + ' 线上API文档'
url = 'https://api.wayknew.com/api/articles/' + str(d[repo_name])
html_content = '<p>' + str_doc + '</p>'
request_data = {'title': title, 'content': str_doc, 'html_content': html_content}
rsp = requests.patch(url, dumps(request_data), headers=head)
if rsp.status_code != 200:
print(rsp.text)
exit(rsp.status_code)
print(repo_name + ' api write to wayknew success')
|
ljm516/big-challenge
|
chanllenge/util/wayknew.py
|
Python
|
apache-2.0
| 1,625
|
from models.contact import Contacts
from data.data_for_contacts import constant as testdata
import pytest
def test_NewContact(app, db, json_contacts):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contacts.create_contact(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contacts.contact_id_or_max) == sorted(new_contacts, key=Contacts.contact_id_or_max)
|
ksemish/KseniyaRepository
|
tests/NewContact.py
|
Python
|
apache-2.0
| 457
|
# pylint: disable=redefined-outer-name, missing-docstring
import sys
import pytest
sys.path.append('..')
from batchflow import Config
@pytest.fixture
def config():
_config = dict(key1='val1', key2=dict())
_config['key2']['subkey1'] = 'val21'
return Config(_config)
class TestConfig:
def test_getitem_key(self, config):
assert config['key1'] == config.config['key1']
def test_getitem_missing_key(self, config):
with pytest.raises(KeyError):
_ = config['missing key']
def test_getitem_nested_key(self, config):
assert config['key2/subkey1'] == config.config['key2']['subkey1']
def test_get_key(self, config):
assert config.get('key1') == config.config.get('key1')
def test_get_nested_key(self, config):
assert config.get('key2/subkey1') == config.config['key2']['subkey1']
def test_get_missing_key(self, config):
assert config.get('missing key') is None
def test_get_missing_key_with_default(self, config):
assert config.get('missing key', default=1) == 1
def test_get_nested_missing_key_with_default(self, config):
assert config.get('key2/missing key', default=1) == 1
def test_pop_key(self, config):
val = config.config.get('key1')
assert config.pop('key1') == val
assert 'key1' not in config, 'key should have been deleted'
def test_pop_nested_key(self, config):
val = config.config['key2']['subkey1']
assert config.pop('key2/subkey1') == val
assert 'subkey1' not in config, 'nested key should have been deleted'
assert 'key2' in config, 'outer key should remain'
def test_pop_missing_key(self, config):
with pytest.raises(KeyError):
_ = config.pop('missing key')
def test_pop_missing_key_with_default(self, config):
assert config.pop('missing key', default=1) == 1
def test_pop_nested_missing_key_with_default(self, config):
assert config.pop('key2/missing key', default=1) == 1
def test_setitem_key(self, config):
config['key1'] = 'new_val1'
assert config['key1'] == config.config['key1']
assert config.config['key1'] == 'new_val1'
def test_setitem_nested_key(self, config):
config['key2/subkey1'] = 'new_val21'
assert config['key2/subkey1'] == config.config['key2']['subkey1']
assert config.config['key2']['subkey1'] == 'new_val21'
def test_setitem_new_key(self, config):
config['key0'] = 'new_val0'
assert config['key0'] == config.config['key0']
assert config.config['key0'] == 'new_val0'
def test_setitem_nested_new_key(self, config):
config['key2/subkey2'] = 'new_val22'
assert config['key2/subkey2'] == config.config['key2']['subkey2']
assert config.config['key2']['subkey2'] == 'new_val22'
|
analysiscenter/dataset
|
batchflow/tests/config_test.py
|
Python
|
apache-2.0
| 2,864
|
import json
import time
import pytest
from anchore_engine.auth.common import (
get_creds_by_registry,
get_docker_registry_userpw,
registry_record_matches,
)
_test_username = "tonystark"
_test_password = "potts"
_test_registry_meta = {
"authorizationToken": "{}:{}".format(_test_username, _test_password)
}
_record_ecr = {
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
}
_record_not_ecr = {
"registry_type": "other-registry",
"registry_user": _test_username,
"registry_pass": _test_password,
}
_record_ecr_inactive = {
"registry": "docker.io",
"record_state_key": "inactive",
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
"registry_verify": True,
}
_record_ecr_unavailable = {
"registry": "docker.io",
"record_state_key": "inactive",
"record_state_val": time.time(), # note: technically this could yield nondeterministic results
"registry_type": "awsecr",
"registry_meta": json.dumps(_test_registry_meta),
"registry_verify": True,
}
@pytest.mark.parametrize("registry_record", [_record_ecr, _record_not_ecr])
def test_get_docker_registry_userpw(registry_record):
result = get_docker_registry_userpw(registry_record)
assert result == (_test_username, _test_password)
def test_get_docker_registry_userpw_bad_json():
record_ecr_bad_json = {
"registry_type": "awsecr",
"registry_meta": "this-is-not-valid-json!}",
}
with pytest.raises(Exception):
get_docker_registry_userpw(record_ecr_bad_json)
@pytest.mark.parametrize(
"registry,repository,registry_creds,expected",
[
("docker.io", "library/node", None, (None, None, None)),
(
"docker.io",
"library/node",
[_record_ecr_inactive],
(_test_username, _test_password, True),
),
],
)
def test_get_creds_by_registry(registry, repository, registry_creds, expected):
result = get_creds_by_registry(registry, repository, registry_creds)
assert result == expected
def test_get_creds_by_registry_unavailable():
with pytest.raises(Exception):
get_creds_by_registry("docker.io", "library/node", [_record_ecr_unavailable])
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io/library/centos", "docker.io", "library/centos"),
("docker.io", "docker.io", "centos"),
("docker.io", "docker.io", "myuser/myrepo"),
],
)
def test_registry_record_matches_exact(registry_record_str, registry, repository):
assert registry_record_matches(registry_record_str, registry, repository)
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io/library/*", "docker.io", "library/centos"),
("docker.io/*", "docker.io", "library/centos"),
("gcr.io/myproject/*", "gcr.io", "myproject/myuser/myrepo"),
],
)
def test_registry_record_matches_wildcard(registry_record_str, registry, repository):
assert registry_record_matches(registry_record_str, registry, repository)
@pytest.mark.parametrize(
"registry_record_str,registry,repository",
[
("docker.io", "gcr.io", "myproject/myuser"),
("docker.io/*", "gcr.io", "myproject/myuser"),
("docker.io/library/*", "docker.io", "myuser/myrepo"),
("docker.io/myuser/myrepo", "docker.io", "myuser/myrepo2"),
],
)
def test_registry_record_matches_non(registry_record_str, registry, repository):
assert not registry_record_matches(registry_record_str, registry, repository)
|
anchore/anchore-engine
|
tests/unit/anchore_engine/auth/test_common.py
|
Python
|
apache-2.0
| 3,612
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trainer_utils.py."""
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from generalization.utils import eval_metric_distribution
from generalization.utils import trainer_utils
def keras_model_builder_with_zeros():
# Create a simple linear regression model, single output.
# We initialize all weights to zero.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='zeros',
input_shape=(1,))
])
return model
def keras_model_builder_with_ones():
model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer='ones',
bias_initializer='ones',
input_shape=(1,))
])
return model
def create_dataset():
# Create data satisfying y = 2*x + 1
x = [[1.0], [2.0], [3.0]]
y = [[3.0], [5.0], [7.0]]
return tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
def create_federated_cd():
x1 = [[1.0]]
y1 = [[3.0]]
dataset1 = (x1, y1)
x2 = [[2.0]]
y2 = [[5.0]]
dataset2 = (x2, y2)
x3 = [[3.0]]
y3 = [[7.0]]
dataset3 = (x3, y3)
return tff.simulation.datasets.TestClientData({
1: dataset1,
2: dataset2,
3: dataset3
}).preprocess(lambda ds: ds.batch(1))
def get_input_spec():
return create_dataset().element_spec
def metrics_builder():
return [tf.keras.metrics.MeanSquaredError()]
def tff_model_builder():
return tff.learning.from_keras_model(
keras_model=keras_model_builder_with_zeros(),
input_spec=get_input_spec(),
loss=tf.keras.losses.MeanSquaredError(),
metrics=metrics_builder())
class CreateEvalFnsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('with_test_cd', True),
('without_test_cd', False))
def test_create_federated_eval_fns(self, use_test_cd):
"""Test for create_federated_eval_fns."""
(part_train_eval_fn, part_val_fn, unpart_fn,
test_fn) = trainer_utils.create_federated_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd() if use_test_cd else None,
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
rounds_per_eval=1,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
model_weights = tff.learning.ModelWeights.from_model(keras_model)
server_state = tff.learning.framework.ServerState(model_weights, [], [], [])
expected_keys = [
f'mean_squared_error/{s}' for s in eval_metric_distribution.ALL_STAT_FNS
]
# Federated validation fn requires a positional arg round_num.
if use_test_cd:
self.assertIsNotNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn, test_fn)
else:
self.assertIsNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn)
for eval_fn in eval_fns_to_test:
metrics_dict = eval_fn(server_state, 0)
self.assertEqual(list(metrics_dict.keys()), expected_keys)
@parameterized.named_parameters(('case1', 3, 4), ('case2', 3, 5),
('case3', 2, 3))
def test_create_federated_eval_fns_skips_rounds(self, rounds_per_eval,
round_num):
"""Test that create_federated_eval_fns skips the appropriate rounds."""
part_train_eval_fn, part_val_fn, unpart_fn, _ = trainer_utils.create_federated_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd(),
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
rounds_per_eval=rounds_per_eval,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
model_weights = tff.learning.ModelWeights.from_model(keras_model)
server_state = tff.learning.framework.ServerState(model_weights, [], [], [])
# Federated validation fn requires a positional arg round_num.
for eval_fn in (part_train_eval_fn, part_val_fn, unpart_fn):
metrics_dict = eval_fn(server_state, round_num)
self.assertEmpty(metrics_dict.keys())
@parameterized.named_parameters(('with_test_cd', True),
('without_test_cd', False))
def test_create_centralized_eval_fns(self, use_test_cd):
"""Test for create_centralized_eval_fns."""
(part_train_eval_fn, part_val_fn, unpart_fn,
test_fn) = trainer_utils.create_centralized_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd() if use_test_cd else None,
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
expected_keys = [
f'mean_squared_error/{s}' for s in eval_metric_distribution.ALL_STAT_FNS
]
if use_test_cd:
self.assertIsNotNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn, test_fn)
else:
self.assertIsNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn)
for eval_fn in eval_fns_to_test:
metrics_dict = eval_fn(keras_model)
self.assertEqual(list(metrics_dict.keys()), expected_keys)
if __name__ == '__main__':
tf.test.main()
|
google-research/federated
|
generalization/utils/trainer_utils_test.py
|
Python
|
apache-2.0
| 6,847
|
# Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""High level pandas structure for the Sonata prometheus data"""
import datetime
import typing # noqa pylint: disable=unused-import
from typing import Dict
import pandas # type: ignore
from son_analyze.core.prometheus import PrometheusData
def convert_timestamp_to_posix(timestamp: str) -> datetime.datetime:
"""Convert the timestamp into a datetime"""
return datetime.datetime.fromtimestamp(float(timestamp), # type: ignore
tz=datetime.timezone.utc)
# pylint: disable=unsubscriptable-object
def build_sonata_df_by_id(prom_data: PrometheusData) -> Dict[str,
pandas.DataFrame]:
"""Build a dict of dataframe. Each dataframe contains the values matching
the corresponding id"""
# noqa TODO: find the longest metrics and use it as the index. Interpolate the
# other metric against it before the merge
result = {}
items_itr = prom_data._by_id.items() # pylint: disable=protected-access
for id_index, all_metrics in items_itr:
acc_ts = []
for elt in all_metrics:
metric_name = elt['metric']['__name__']
index, data = zip(*elt['values'])
index = [convert_timestamp_to_posix(z) for z in index]
this_serie = pandas.Series(data, index=index)
this_serie.name = metric_name
acc_ts.append(this_serie)
dataframe = pandas.concat(acc_ts, join='outer', axis=1)
dataframe.index = pandas.date_range(
start=dataframe.index[0],
periods=len(dataframe.index),
freq='S')
dataframe = dataframe.interpolate(method='index')
# import pdb; pdb.set_trace()
result[id_index] = dataframe
return result
|
cgeoffroy/son-analyze
|
son-scikit/src/son_scikit/hl_prometheus.py
|
Python
|
apache-2.0
| 2,969
|
# Copyright 2015 Don Drake don@drakeconsulting.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import unittest
import datetime
import time
from copy import deepcopy
src_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(src_dir, '..'))
from pyspark.sql.types import Row, StructType, StructField, IntegerType, StringType, BinaryType, BooleanType, DateType, TimestampType, DoubleType, FloatType, ByteType, LongType, ShortType
from SparkTestCase import SparkTestCase
from smartframes import SmartFrames
class SimpleTable(SmartFrames.SmartFrames):
schema = StructType( sorted(
[
StructField("pk_id", IntegerType()),
StructField("first_name", StringType()),
],
key = lambda x: x.name))
skipSelectedFields = []
class ComplexTable(SmartFrames.SmartFrames):
schema = StructType( sorted(
[
StructField("pk_id", IntegerType()),
StructField("string", StringType()),
StructField("binary", BinaryType()),
StructField("boolean", BooleanType()),
StructField("date", DateType()),
StructField("time", TimestampType()),
StructField("double1", DoubleType()),
StructField("double2", DoubleType()),
StructField("float1", FloatType()),
StructField("float2", FloatType()),
StructField("byte", ByteType()),
StructField("integer", IntegerType()),
StructField("along", LongType()),
StructField("short", ShortType()),
],
key = lambda x: x.name))
skipSelectedFields = []
class TestSmartFrames(SparkTestCase):
def testSimpleTable(self):
simpleTable = SimpleTable()
self.assertEquals(simpleTable.schema, SimpleTable().schema)
s1 = SimpleTable()
s1.pk_id = 1
s1.first_name = 'Don'
s2 = SimpleTable()
s2.pk_id = 2
s2.first_name = 'Dan'
df = self.sqlCtx.createDataFrame(self.sc.parallelize([s1.createRow(), s2.createRow()]), s1.schema)
self.assertEquals(2, df.count())
fileName = self.tempdir + '/simple.table'
df.saveAsParquetFile(fileName)
df2 = self.sqlCtx.parquetFile(fileName)
self.assertEquals(sorted(df.collect()), sorted(df2.collect()))
def testComplexTable(self):
complexTable = ComplexTable()
self.assertEquals(complexTable.schema, ComplexTable().schema)
s1 = ComplexTable()
s1.pk_id = 1
s1.string = 'abcdefghijklmnopqrstuvwxyz'
s1.binary = bytearray(b"0xDEADBEEF")
s1.boolean = True
s1.date = datetime.date(2015, 10, 3)
s1.time = datetime.datetime(2015, 10, 3, 14, 33)
s1.double1 = 1
s1.double2 = 2.2
s1.float1 = 1
s1.float2 = 2.2
s1.byte = 100
s1.integer = 10000
s1.along = 10000
s1.short = 10
df = self.sqlCtx.createDataFrame(self.sc.parallelize([s1.createRow()]), s1.schema)
fileName = self.tempdir + '/complex.table'
df.saveAsParquetFile(fileName)
df2 = self.sqlCtx.parquetFile(fileName)
self.assertEquals(sorted(df.collect()), sorted(df2.collect()))
r1 = df2.collect()[0]
print "r1=", r1
self.assertEquals(r1.pk_id, s1.pk_id)
self.assertEquals(r1.string, s1.string)
self.assertEquals(r1.binary, s1.binary)
self.assertEquals(r1.boolean, s1.boolean)
self.assertEquals(r1.date, s1.date)
self.assertEquals(r1.time, s1.time)
self.assertEquals(r1.double1, s1.double1)
self.assertEquals(r1.double2, s1.double2)
self.assertEquals(r1.float1, s1.float1)
# AssertionError: 2.200000047683716 != 2.2
#self.assertEquals(r1.float2, s1.float2)
self.assertEquals(r1.byte, s1.byte)
self.assertEquals(r1.integer, s1.integer)
self.assertEquals(r1.along, s1.along)
self.assertEquals(r1.short, s1.short)
def testComplexTableTiming(self):
s1 = ComplexTable()
s1.pk_id = 1
s1.string = 'abcdefghijklmnopqrstuvwxyz'
s1.binary = bytearray(b"0xDEADBEEF")
s1.boolean = True
s1.date = datetime.date(2015, 10, 3)
s1.time = datetime.datetime(2015, 10, 3, 14, 33)
s1.double1 = 1
s1.double2 = 2.2
s1.float1 = 1
s1.float2 = 2.2
s1.byte = 100
s1.integer = 10000
s1.along = 10000
s1.short = 10
numRows = 10000
rows = []
start = time.clock()
for n in xrange(numRows):
rows.append(deepcopy(s1.createRow()))
end = time.clock()
print "Duration for ", numRows, " is ", (end - start)
df = self.sqlCtx.createDataFrame(self.sc.parallelize(rows), s1.schema)
# duration for v.1.0.1 ~ 2.4 seconds for 10000 rows
# duration for v.1.1.0 ~ 1.6 seconds for 10000 rows
count = df.count()
self.assertEquals(count, numRows)
if __name__ == '__main__':
unittest.main()
|
dondrake/smartframes
|
tests/test_SmartFrames.py
|
Python
|
apache-2.0
| 5,541
|
#
# Copyright 2013 Nicolas Lamirault <nicolas.lamirault@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from cliff import command
logger = logging.getLogger(__name__)
class FreeboxCommand(command.Command):
"""Default Freebox command."""
pass
class FreeboxApiVersion(FreeboxCommand):
"""Retrieve the Freebox OS api version."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] API_Version")
api_version = self.app.freebox_client.version()
#print "Result: %s" % api_version
logger.info('[FreeboxOS] %s\n' % api_version['api_version'])
class FreeboxLogin(FreeboxCommand):
"""Login to the Freebox OS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Login")
self.app.freebox_client.login()
# self.app.stdout.write('FreeboxOS: %s\n' %
# self.app.freebox_client)
logger.info('[FreeboxOS] Login response: %s' % self.app.freebox_client)
class FreeboxAuthorize(FreeboxCommand):
"""Request authorization for this application."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Authorization request")
self.app.freebox_client.ask_authorization()
class FreeboxCheckAuthorization(FreeboxCommand):
"""Request informations about authorization for this application."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Check Authorization ")
self.app.freebox_client.check_authorization()
class FreeboxOpenSession(FreeboxCommand):
"""Open a new session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Open sesion")
self.app.freebox_client.open_session()
class FreeboxCloseSession(FreeboxCommand):
"""Close the current session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Close sesion")
self.app.freebox_client.close_session()
class FreeboxWifiStatus(FreeboxCommand):
"""Retrieve the WIFI status."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi status")
wifi_status = self.app.freebox_client.get_wifi_status()
logger.info("[FreeboxOS] Wifi status:\n %s" % wifi_status)
class FreeboxWifiConfiguration(FreeboxCommand):
"""Retrieve the current WIFI configuration."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi configuration")
wifi_config = self.app.freebox_client.get_wifi_config()
logger.info("[FreeboxOS] Wifi configuration:\n %s" % wifi_config)
class FreeboxWifiStations(FreeboxCommand):
"""Retrieve a list of wifi stations."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi stations")
wifi_stations = self.app.freebox_client.get_wifi_stations()
logger.info("[FreefoxOS] Wifi stations:\n %s" % wifi_stations)
|
nlamirault/python-freeboxclient
|
freeboxclient/client.py
|
Python
|
apache-2.0
| 3,447
|
#!/usr/bin/env python
# @author: Martin Siggel <martin.siggel@dlr.de>
#
# This script fixes the cmake exports file by
# removing explicit linking to system libraries
import sys, re
def remove_absolute_paths(line):
"""
Removes libraries from the line that are found under /usr
"""
if sys.platform == 'win32':
return line
elif sys.platform == 'darwin':
return re.sub('/Applications/[-_a-zA-Z0-9/.]+.framework[;]?', '', line)
else:
return re.sub('/usr/[-_a-zA-Z0-9/]+.so[;]?', '', line)
def fix_paths(filename):
with open(filename) as f:
lines = f.readlines()
# just select lines containing string IMPORTED_LINK_INTERFACE_LIBRARIES
for i, line in enumerate(lines):
if "IMPORTED_LINK_INTERFACE_LIBRARIES" in line or "INTERFACE_LINK_LIBRARIES" in line:
lines[i] = remove_absolute_paths(line)
fout = open(filename,'w')
fout.write("".join(lines))
fout.close()
if __name__ == "__main__":
assert(len(sys.argv) == 2)
filename = sys.argv[1]
fix_paths(filename)
|
DLR-SC/tigl
|
ci/conda/oce-static/remove-system-libs.py
|
Python
|
apache-2.0
| 1,087
|
#! /usr/bin/env python
"""
Parse PER vs. SINR data from trace files.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-19 17:04:02 -0500 (Wed, 19 Oct 2011) $
* $LastChangedRevision: 5220 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins import *
from wins.ieee80211 import *
from optparse import OptionParser
import sys
from copy import copy
from numpy import array
def read_trace(options, tracefile):
# load trace from file
tr = Trace()
tr.read(tracefile)
# return trace
return tr
DETECTFAIL1 = "not detected in LISTEN"
HEADERFAIL1 = "header parameters failed"
HEADERFAIL2 = "header decoding failed"
IGNOREFAIL1 = "ignore rxdata in DECODE"
IGNOREFAIL2 = "ignore detect in DECODE"
def parse_per_info(options, trace, fmt='bo', usemodel=False):
# initialize parameters
param, data = {}, []
mcs, rmsdelay = None, []
ncollision = options.ncollision
# parse trace
for e in trace.events:
obj, evt = e['obj'], e['event']
# check for MCS parameter
if ('phy-rate' in e):
rate = int(e['phy-rate'])
hparamfail = ('drop' in e) and (e['drop']==HEADERFAIL1)
if not hparamfail:
if mcs is None: mcs = rate
else: assert (mcs == rate)
# check for 802.11n RCV & DRP events
if (obj=="80211N"):
rcv, drp = (evt=="RCV"), (evt=="DRP")
x, y = None, None
if drp:
drop = e['drop']
notdetected = (drop==DETECTFAIL1)
hparamfail = (drop==HEADERFAIL1)
headerfail = (drop==HEADERFAIL2)
ignorefail = (drop==IGNOREFAIL1) or (drop==IGNOREFAIL2)
assert (notdetected or hparamfail or headerfail or ignorefail), "%s"%(e)
#sinr = float(e['dot11n-sinr'].lower().replace("db","") )
#x, y = sinr, 1.0 # log header drop as a packet error also
elif rcv:
sinr = float(e['dot11n-sinr'].lower().replace("db","") )
err = e['crc']
haserror = (err=="FAIL")
noerror = (err=="OK")
assert (haserror or noerror)
if usemodel:
per = float(e['dot11n-model-per'])
else:
if haserror: per = 1.0
else: per = 0.0
# check if ncollision matches
keepdata = True
if (ncollision is not None):
keepdata = False
if 'cif-collision' in e:
coll = eval(e['cif-collision'])
assert isinstance(coll, list)
keepdata = (len(coll) == ncollision)
if keepdata:
x, y = sinr, per
# log data point
if (x is not None) and (y is not None):
dp = {'x':x, 'y':y, 'ndata': 1}
data.append(dp)
# check for RMS delay
if (rcv or drp):
tau = float(e['dot11n-rmsdelay'])
rmsdelay.append(tau)
# check parameters
assert (rmsdelay)
assert (mcs is not None)
avgdelay = array(rmsdelay).mean()
pertype = "actual"
if usemodel: pertype = "model"
# return param and data
param['mcs'] = mcs
param['rmsdelay'] = avgdelay
param['format'] = fmt
label = "${\\rm PER}_{%s}$ ${\\rm (MCS = %d}$, "%(pertype,mcs)
if ncollision is not None: label +="$N_{coll} = %d$, "%(ncollision)
label += "$\\sigma_{rms} = %.3g ns)$"%(avgdelay*1e9)
param['label'] = label
return param, data
def parse_per():
usage = "%prog [OPTIONS] TRACEFILE1 [TRACEFILE2 ...]\n" + \
" Writes parsed data to standard output."
parser = OptionParser(usage=usage)
parser.add_option("-c", "--ncollision", dest="ncollision", type="int", \
default=None, help="Filter results using number of collisions. [default=%default]")
(options, args) = parser.parse_args()
if len(args)<1:
print "Insufficient number of arguments."
parser.print_help()
raise SystemExit
tracefile = args[0:]
numtraces = len(tracefile)
# set parameters
default_parameters = {'xlabel': "SINR (dB)", \
'ylabel': "PER", \
'title': "PER vs. SINR", \
'label': None, \
'source': None, \
'format': None}
lgd, formats = [], [('ro','r:'), ('bo', 'b:'), ('go', 'g:')]
for k in range(numtraces):
tfile = tracefile[k]
# treat as normal wins trace file
trace = read_trace(options, tfile)
fmt = formats[k%len(formats)]
if not trace: continue
sys.stderr.write("Parsing trace from %s ...\n"%(tfile))
# parse actual PER from trace
param, data = parse_per_info(options, trace)
if data:
parameters = copy(default_parameters)
parameters.update(param)
parameters['source'] = tfile
parameters['format'] = fmt[0]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
# parse model PER from trace
param, data = parse_per_info(options, trace, usemodel=True)
if data:
parameters = copy(default_parameters)
parameters.update(param)
parameters['source'] = tfile
parameters['format'] = fmt[1]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
if __name__ == '__main__':
parse_per()
|
reidlindsay/wins
|
sandbox/experiments/aloha/infocom/parse-per.py
|
Python
|
apache-2.0
| 6,546
|
import ctypes
class JLinkException(Exception): pass
class JLink(object):
"Jlink api"
revser = 0
def __init__(self, dllpath):
self.jlink = ctypes.cdll.LoadLibrary(dllpath)
self.tif_select(1)
self.set_speed(1000)
self.reset()
def check_err(fn):
def checked_transaction(self,*args):
self.jlink.JLINK_ClrError()
ret = fn(self, *args)
errno = self.jlink.JLINK_HasError()
if errno:
raise JLinkException(errno)
return ret
return checked_transaction
@check_err
def tif_select(self, tif): return self.jlink.JLINKARM_TIF_Select(tif)
@check_err
def set_speed(self, khz): return self.jlink.JLINKARM_SetSpeed(khz)
@check_err
def reset(self): return self.jlink.JLINKARM_Reset()
@check_err
def halt(self): return self.jlink.JLINKARM_Halt()
@check_err
def clear_tck(self): return self.jlink.JLINKARM_ClrTCK()
@check_err
def clear_tms(self): return self.jlink.JLINKARM_ClrTMS()
@check_err
def set_tms(self): return self.jlink.JLINKARM_SetTMS()
@check_err
def read_reg(self,r): return self.jlink.JLINKARM_ReadReg(r)
@check_err
def write_reg(self,r,val): return self.jlink.JLINKARM_WriteReg(r,val)
@check_err
def write_U32(self,r,val): return self.jlink.JLINKARM_WriteU32(r,val)
@check_err
def write_U16(self,r,val): return self.jlink.JLINKARM_WriteU16(r,val)
@check_err
def open(self): return self.jlink.JLINKARM_Open()
@check_err
def close(self): return self.jlink.JLINKARM_Close()
@check_err
def go(self): return self.jlink.JLINKARM_Go()
@check_err
def write_mem(self, startaddress, data):
buf=ctypes.create_string_buffer(data)
return self.jlink.JLINKARM_WriteMem(startaddress,len(data),buf)
@check_err
def read_mem(self, startaddress, length):
buf=ctypes.create_string_buffer(length)
ret=self.jlink.JLINKARM_ReadMem(startaddress,length, buf)
return buf,ret
@check_err
def read_mem_U32(self, startaddress, count):
buftype=ctypes.c_uint32 * int(count)
buf=buftype()
ret=self.jlink.JLINKARM_ReadMemU32(startaddress, count, buf, 0)
return buf,ret
def read_U32(self, startaddress):
buf, ret = self.read_mem_U32(startaddress, 1)
return buf[0]
|
xqt2010a/Python_Study
|
python/07_JLink_RTT/JLink.py
|
Python
|
apache-2.0
| 2,491
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the XLSX output module."""
import os
import unittest
import zipfile
from xml.etree import ElementTree
from plaso.containers import events
from plaso.formatters import interface as formatters_interface
from plaso.formatters import manager as formatters_manager
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.output import xlsx
from tests import test_lib as shared_test_lib
from tests.output import test_lib
class TestEvent(events.EventObject):
"""Event object used for testing."""
DATA_TYPE = u'test:xlsx'
def __init__(self):
"""Initializes an event object used for testing."""
super(TestEvent, self).__init__()
self.timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')
self.timestamp_desc = eventdata.EventTimestamp.CHANGE_TIME
self.hostname = u'ubuntu'
self.filename = u'log/syslog.1'
self.text = (
u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
u'closed for user root) Invalid character -> \ud801')
class TestEventFormatter(formatters_interface.EventFormatter):
"""Event object formatter used for testing."""
DATA_TYPE = u'test:xlsx'
FORMAT_STRING = u'{text}'
SOURCE_SHORT = u'LOG'
SOURCE_LONG = u'Syslog'
class XLSXOutputModuleTest(test_lib.OutputModuleTestCase):
"""Test the XLSX output module."""
_SHARED_STRINGS = u'xl/sharedStrings.xml'
_SHEET1 = u'xl/worksheets/sheet1.xml'
_COLUMN_TAG = u'}c'
_ROW_TAG = u'}row'
_SHARED_STRING_TAG = u'}t'
_SHARED_STRING_TYPE = u's'
_TYPE_ATTRIBUTE = u't'
_VALUE_STRING_TAG = u'}v'
def _GetSheetRows(self, filename):
"""Parses the contents of the first sheet of an XLSX document.
Args:
filename: The file path of the XLSX document to parse.
Returns:
A list of dictionaries representing the rows and columns of the first
sheet.
"""
zip_file = zipfile.ZipFile(filename)
# Fail if we can't find the expected first sheet.
if self._SHEET1 not in zip_file.namelist():
raise ValueError(
u'Unable to locate expected sheet: {0:s}'.format(self._SHEET1))
# Generate a reference table of shared strings if available.
strings = []
if self._SHARED_STRINGS in zip_file.namelist():
zip_file_object = zip_file.open(self._SHARED_STRINGS)
for _, element in ElementTree.iterparse(zip_file_object):
if element.tag.endswith(self._SHARED_STRING_TAG):
strings.append(element.text)
row = []
rows = []
value = u''
zip_file_object = zip_file.open(self._SHEET1)
for _, element in ElementTree.iterparse(zip_file_object):
if (element.tag.endswith(self._VALUE_STRING_TAG) or
element.tag.endswith(self._SHARED_STRING_TAG)):
value = element.text
if element.tag.endswith(self._COLUMN_TAG):
# Grab value from shared string reference table if type shared string.
if (strings and element.attrib.get(self._TYPE_ATTRIBUTE) ==
self._SHARED_STRING_TYPE):
try:
value = strings[int(value)]
except (IndexError, ValueError):
raise ValueError(
u'Unable to successfully dereference shared string.')
row.append(value)
# If we see the end tag of the row, record row in rows and reset.
if element.tag.endswith(self._ROW_TAG):
rows.append(row)
row = []
return rows
def testWriteEventBody(self):
"""Tests the WriteHeader function."""
formatters_manager.FormattersManager.RegisterFormatter(TestEventFormatter)
expected_header = [
u'datetime', u'timestamp_desc', u'source', u'source_long',
u'message', u'parser', u'display_name', u'tag']
expected_event_body = [
u'41087.76181712963', u'Metadata Modification Time', u'LOG', u'Syslog',
u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '
u'closed for user root) Invalid character -> \ufffd',
u'-', u'-', u'-']
with shared_test_lib.TempDirectory() as temp_directory:
output_mediator = self._CreateOutputMediator()
output_module = xlsx.XLSXOutputModule(output_mediator)
xslx_file = os.path.join(temp_directory, u'xlsx.out')
output_module.SetFilename(xslx_file)
output_module.Open()
output_module.WriteHeader()
output_module.WriteEvent(TestEvent())
output_module.WriteFooter()
output_module.Close()
try:
rows = self._GetSheetRows(xslx_file)
except ValueError as exception:
self.fail(exception)
self.assertEqual(expected_header, rows[0])
self.assertEqual(len(expected_event_body), len(rows[1]))
self.assertEqual(expected_event_body, rows[1])
def testWriteHeader(self):
"""Tests the WriteHeader function."""
expected_header = [
u'datetime', u'timestamp_desc', u'source', u'source_long',
u'message', u'parser', u'display_name', u'tag']
with shared_test_lib.TempDirectory() as temp_directory:
output_mediator = self._CreateOutputMediator()
output_module = xlsx.XLSXOutputModule(output_mediator)
xlsx_file = os.path.join(temp_directory, u'xlsx.out')
output_module.SetFilename(xlsx_file)
output_module.Open()
output_module.WriteHeader()
output_module.WriteFooter()
output_module.Close()
try:
rows = self._GetSheetRows(xlsx_file)
except ValueError as exception:
self.fail(exception)
self.assertEqual(expected_header, rows[0])
if __name__ == u'__main__':
unittest.main()
|
dc3-plaso/plaso
|
tests/output/xlsx.py
|
Python
|
apache-2.0
| 5,608
|
# -*- coding: utf-8 -*-
import curses
import npyscreen
from vent.helpers.paths import PathDirs
from vent.menu import VentApp
from vent.menus.main import MainForm
npyscreen.TEST_SETTINGS['CONTINUE_AFTER_TEST_INPUT'] = False
def run_menu(test_input):
""" Actually run the menu and process any input """
# initialize tutorial
paths = PathDirs()
first_time = paths.ensure_file(paths.init_file)
assert first_time[0] == True
npyscreen.TEST_SETTINGS['TEST_INPUT'] = test_input
A = VentApp()
try:
A.run(fork=False)
except npyscreen.ExhaustedTestInput as e:
pass
def test_tools_status():
""" Test the staticmethod tools_status """
a, b = MainForm.t_status(True)
assert isinstance(a, str)
assert isinstance(b, tuple)
def test_menu():
""" Run menu tests """
CTRL_Q = '^Q'
CTRL_T = '^T'
CTRL_X = '^X'
CTRL_V = '^V'
ENTER = curses.ascii.CR
TAB = curses.ascii.TAB
LEFT = curses.KEY_LEFT
RIGHT = curses.KEY_RIGHT
DOWN = curses.KEY_DOWN
SPACE = curses.ascii.SP
BACKSPACE = curses.ascii.BS
# go through help menus
run_menu([ENTER, CTRL_T, CTRL_X, 'b', 'm', ENTER, ENTER, CTRL_X, 'b', 'p',
ENTER, ENTER, CTRL_X, 'b', 't', ENTER, ENTER, CTRL_X, 'b', 'f',
ENTER, ENTER, CTRL_X, 'b', 'c', ENTER, ENTER, CTRL_X, 'b', 's',
ENTER, ENTER, CTRL_X, 'p', 'a', ENTER, ENTER, CTRL_X, 'p', 'b',
ENTER, ENTER, ENTER])
# go to help menu and leave again
run_menu([ENTER, CTRL_T, RIGHT, ENTER])
# go through the core tools menus
# install
run_menu([ENTER, CTRL_X, 'c', 'i', ENTER])
# build - ok
run_menu([ENTER, CTRL_X, 'c', 'b', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER])
# build - cancel
run_menu([ENTER, CTRL_X, 'c', 'b', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
ENTER])
# build - quit back to main
run_menu([ENTER, CTRL_X, 'c', 'b', CTRL_Q])
# build - toggle to main
run_menu([ENTER, CTRL_X, 'c', 'b', CTRL_T])
# start - ok
run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
TAB, TAB, ENTER, ENTER, ENTER, ENTER, ENTER])
# start - cancel
run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
TAB, ENTER])
# start - quit back to main
run_menu([ENTER, CTRL_X, 'c', 's', CTRL_Q])
# start - toggle to main
run_menu([ENTER, CTRL_X, 'c', 's', CTRL_T])
# configure - cancel
run_menu([ENTER, CTRL_X, 'c', 't', TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB,
SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB,
LEFT, ENTER])
# configure - quit back to main
run_menu([ENTER, CTRL_X, 'c', 't', CTRL_Q])
# configure - toggle back to main
run_menu([ENTER, CTRL_X, 'c', 't', CTRL_T])
# configure - ok
run_menu([ENTER, CTRL_X, 'c', 't', TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB,
SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB,
TAB, ENTER, TAB, TAB, ENTER, ENTER, ENTER])
# configure - quit in the middle of add
# run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB,
# SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN,
# DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '3', TAB, TAB,
# ENTER, ENTER, TAB, ENTER, ENTER, TAB, ENTER, CTRL_Q])
# configure - instances add (add an instance of rq_worker)
# run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB,
# SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN,
# DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '3', TAB, TAB,
# ENTER, ENTER, TAB, ENTER, ENTER, TAB, ENTER, TAB, TAB, ENTER])
# configure - quit in the middle of delete
# run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN,
# DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '2',
# TAB, TAB, ENTER, ENTER, TAB, ENTER, CTRL_Q])
# configure - instances delete (delete an instance of file_drop)
# run_menu([ENTER, CTRL_X, 'c', 't', SPACE, TAB, SPACE, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB,
# SPACE, TAB, SPACE, TAB, TAB, ENTER, DOWN, DOWN, DOWN, DOWN, DOWN,
# DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, DOWN, LEFT, BACKSPACE, '2',
# TAB, TAB, ENTER, ENTER, TAB, ENTER, SPACE, TAB, TAB, ENTER])
# clean - ok
run_menu([ENTER, CTRL_X, 'c', 'c', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER])
# clean - cancel
run_menu([ENTER, CTRL_X, 'c', 'c', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
ENTER])
# clean - quit back to main
run_menu([ENTER, CTRL_X, 'c', 'c', CTRL_Q])
# clean - toggle to main
run_menu([ENTER, CTRL_X, 'c', 'c', CTRL_T])
# inventory - quit back to main
run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_Q])
# inventory - toggle to main
run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_T])
# inventory - toggle group view
run_menu([ENTER, CTRL_X, 'c', 'v', CTRL_V, CTRL_V, CTRL_V, CTRL_V, CTRL_V,
CTRL_V, CTRL_V, CTRL_V, CTRL_T])
run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'c', 's', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
ENTER])
run_menu([ENTER, CTRL_X, 'c', 's', CTRL_Q])
run_menu([ENTER, CTRL_X, 'c', 's', CTRL_T])
# services running - core services
run_menu([ENTER, CTRL_X, 's', 'c', CTRL_T])
# services running - external services
run_menu([ENTER, CTRL_X, 's', 'e', CTRL_T])
run_menu([ENTER, CTRL_X, 'c', 'p', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'c', 'p', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
ENTER])
run_menu([ENTER, CTRL_X, 'c', 'p', CTRL_Q])
run_menu([ENTER, CTRL_X, 'c', 'p', CTRL_T])
run_menu([ENTER, CTRL_X, 'c', 'u', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'c', 'u', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
ENTER])
run_menu([ENTER, CTRL_X, 'c', 'u', CTRL_Q])
run_menu([ENTER, CTRL_X, 'c', 'u', CTRL_T])
run_menu([ENTER, CTRL_X, 'c', 'r', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'c', 'r', ENTER])
run_menu([ENTER, CTRL_X, 'c', 'r', CTRL_Q])
run_menu([ENTER, CTRL_X, 'c', 'r', CTRL_T])
run_menu([ENTER, CTRL_X, 'c', 't', TAB, ENTER, ENTER, ENTER])
# go through the plugins menus
run_menu([ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
RIGHT, ENTER, SPACE, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER,
SPACE, TAB, SPACE, TAB, SPACE, TAB, TAB, SPACE, TAB, SPACE, TAB,
TAB, ENTER, ENTER, ENTER])
cmds = [ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, 'alpine', TAB, TAB, TAB,
TAB, TAB, TAB, ENTER, ENTER, ENTER]
cmds += (43 * [BACKSPACE])
cmds += [TAB, TAB, TAB, BACKSPACE, BACKSPACE, BACKSPACE, BACKSPACE,
BACKSPACE, BACKSPACE, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER,
ENTER, CTRL_Q]
run_menu(cmds)
run_menu([ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, 'alpine', TAB, 'alpine',
TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, TAB, TAB, ENTER,
ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'a', CTRL_T, CTRL_T, TAB, TAB, TAB, TAB, TAB,
TAB, TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'b', TAB, TAB, RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'b', TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'b', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 'b', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 'c', TAB, TAB, RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'c', TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'c', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 'c', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 'i', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 's', TAB, TAB, RIGHT, ENTER, ENTER, ENTER,
ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 's', TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'p', 's', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 's', CTRL_T])
# services running - plugin services
run_menu([ENTER, CTRL_X, 's', 'p', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 'p', RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'p', ENTER])
run_menu([ENTER, CTRL_X, 'p', 'p', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 'p', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 'u', TAB, TAB, RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'u', TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'u', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 'u', CTRL_T])
run_menu([ENTER, CTRL_X, 'p', 'r', TAB, TAB, RIGHT, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, 'p', 'r', ENTER])
run_menu([ENTER, CTRL_X, 'p', 'r', CTRL_Q])
run_menu([ENTER, CTRL_X, 'p', 'r', CTRL_T])
# go through the logs menus
run_menu([ENTER, CTRL_X, 'l', DOWN, ENTER, CTRL_T])
run_menu([ENTER, CTRL_X, 'l', DOWN, ENTER, CTRL_Q])
# go through the services running menus
run_menu([ENTER, CTRL_X, 's', 'c', CTRL_T])
run_menu([ENTER, CTRL_X, 's', 'e', CTRL_T])
run_menu([ENTER, CTRL_X, 's', 'p', CTRL_T])
# go through the system commands menus
# causes .coverage file to not exist
# run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'r', TAB, RIGHT,
# ENTER, ENTER, ENTER])
# system commands - backup
run_menu([ENTER, CTRL_X, 'y', 'b', ENTER, ENTER])
# system commands - configure - cancel
run_menu([ENTER, CTRL_X, 'y', 'c', TAB, ENTER, ENTER, ENTER])
# system commands - configure - ok
run_menu([ENTER, CTRL_X, 'y', 'c', TAB, TAB, ENTER, ENTER, ENTER])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'g', ENTER, ENTER])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 's'])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'u'])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 'b', ENTER, ENTER])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 't', SPACE, TAB,
ENTER])
run_menu([ENTER, CTRL_X, DOWN, DOWN, DOWN, DOWN, ENTER, 't', SPACE, TAB,
TAB, ENTER, ENTER, ENTER])
# system commands - network tap interface - create
run_menu([ENTER, CTRL_X, 'y', 'n', 'c', 'lo', TAB, 'foo', TAB, '5', TAB,
TAB, '1', TAB, TAB, ENTER, ENTER, ENTER, TAB, TAB, TAB, TAB, TAB,
ENTER])
# system commands - network tap interface - nics
run_menu([ENTER, CTRL_X, 'y', 'n', 'n', TAB, ENTER])
run_menu([ENTER, CTRL_X, 'y', 'n', 'n', TAB, TAB, ENTER])
run_menu([ENTER, CTRL_X, 'y', 'n', 'n', CTRL_T])
# go through the tutorials menus
run_menu([ENTER, CTRL_X, 't', 'v', 'b', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'v', 't', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'v', 's', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'c', 'b', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'c', 'c', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'p', 'a', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 'f', 'a', RIGHT, ENTER])
run_menu([ENTER, CTRL_X, 't', 's', 's', RIGHT, ENTER])
# exit
# causes .coverage file to not exist
# run_menu([ENTER, CTRL_Q])
# extra complete run
run_menu([ENTER, CTRL_X, 'c', 'i', ENTER, ENTER, CTRL_X, 'c', 'b', TAB,
TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER,
ENTER, CTRL_X, 'c', 's', ENTER, ENTER, TAB, TAB, TAB, TAB, TAB,
TAB, TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, ENTER, ENTER,
ENTER, CTRL_X, 'p', 'a', TAB, TAB, TAB, TAB, TAB, TAB, TAB, TAB,
TAB, ENTER, SPACE, TAB, TAB, TAB, TAB, ENTER, TAB, TAB, TAB, TAB,
TAB, TAB, TAB, TAB, ENTER, ENTER, ENTER, CTRL_X, 's', 'c',
CTRL_T])
|
Jeff-Wang93/vent
|
tests/menu/test_menu.py
|
Python
|
apache-2.0
| 12,615
|
#!/usr/bin/env python
# This script will tweet the text that is passed as an argument
# Requires Twython, API credentials set as env vars
# Usage: python status-tweet.py "Hello Everyone, this is my Raspberry Pi tweeting you more nonsense"
import sys
import os
from twython import Twython
import twitter_api_creds
# Set Twitter Credentials from environment variables
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
ACCESS_KEY = os.getenv("ACCESS_KEY")
ACCESS_SECRET = os.getenv("ACCESS_SECRET")
api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
# Tweet
api.update_status(status=sys.argv[1][:140])
|
swoodford/twitter
|
status-tweet.py
|
Python
|
apache-2.0
| 663
|
from direct.actor.Actor import Actor
from direct.task.Task import Task
from panda3d.core import *
from panda3d.direct import *
from otp.otpbase.OTPBase import OTPBase
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.parties.DistributedPartyActivity import DistributedPartyActivity
from toontown.parties.PartyGlobals import ActivityIds, ActivityTypes, JUKEBOX_TIMEOUT
from toontown.parties.PartyGlobals import getMusicRepeatTimes, MUSIC_PATH, sanitizePhase
from toontown.parties.JukeboxGui import JukeboxGui
class DistributedPartyJukeboxActivityBase(DistributedPartyActivity):
notify = directNotify.newCategory('DistributedPartyJukeboxActivityBase')
def __init__(self, cr, actId, phaseToMusicData):
DistributedPartyActivity.__init__(self, cr, actId, ActivityTypes.Continuous)
self.phaseToMusicData = phaseToMusicData
self.jukebox = None
self.gui = None
self.tunes = []
self.music = None
self.currentSongData = None
self.localQueuedSongInfo = None
self.localQueuedSongListItem = None
return
def generateInit(self):
self.gui = JukeboxGui(self.phaseToMusicData)
def load(self):
DistributedPartyActivity.load(self)
self.jukebox = Actor('phase_13/models/parties/jukebox_model', {'dance': 'phase_13/models/parties/jukebox_dance'})
self.jukebox.reparentTo(self.root)
self.jukebox.loop('dance', fromFrame=0, toFrame=48)
self.jukebox.setBlend(frameBlend = True)
self.collNode = CollisionNode(self.getCollisionName())
self.collNode.setCollideMask(ToontownGlobals.CameraBitmask | ToontownGlobals.WallBitmask)
collTube = CollisionTube(0, 0, 0, 0.0, 0.0, 4.25, 2.25)
collTube.setTangible(1)
self.collNode.addSolid(collTube)
self.collNodePath = self.jukebox.attachNewNode(self.collNode)
self.sign.setPos(-5.0, 0, 0)
self.activate()
def unload(self):
DistributedPartyActivity.unload(self)
self.gui.unload()
if self.music is not None:
self.music.stop()
self.jukebox.stop()
self.jukebox.delete()
self.jukebox = None
self.ignoreAll()
return
def getCollisionName(self):
return self.uniqueName('jukeboxCollision')
def activate(self):
self.accept('enter' + self.getCollisionName(), self.__handleEnterCollision)
def __handleEnterCollision(self, collisionEntry):
if base.cr.playGame.getPlace().fsm.getCurrentState().getName() == 'walk':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
def joinRequestDenied(self, reason):
DistributedPartyActivity.joinRequestDenied(self, reason)
self.showMessage(TTLocalizer.PartyJukeboxOccupied)
def handleToonJoined(self, toonId):
toon = base.cr.doId2do.get(toonId)
if toon:
self.jukebox.lookAt(base.cr.doId2do[toonId])
self.jukebox.setHpr(self.jukebox.getH() + 180.0, 0, 0)
if toonId == base.localAvatar.doId:
self.__localUseJukebox()
def handleToonExited(self, toonId):
if toonId == base.localAvatar.doId and self.gui.isLoaded():
self.__deactivateGui()
def handleToonDisabled(self, toonId):
self.notify.warning('handleToonDisabled no implementation yet')
def __localUseJukebox(self):
base.localAvatar.disableAvatarControls()
base.localAvatar.stopPosHprBroadcast()
self.__activateGui()
self.accept(JukeboxGui.CLOSE_EVENT, self.__handleGuiClose)
taskMgr.doMethodLater(0.5, self.__localToonWillExitTask, self.uniqueName('toonWillExitJukeboxOnTimeout'), extraArgs=None)
self.accept(JukeboxGui.ADD_SONG_CLICK_EVENT, self.__handleQueueSong)
if self.isUserHost():
self.accept(JukeboxGui.MOVE_TO_TOP_CLICK_EVENT, self.__handleMoveSongToTop)
return
def __localToonWillExitTask(self, task):
self.localToonExiting()
return Task.done
def __activateGui(self):
self.gui.enable(timer=JUKEBOX_TIMEOUT)
self.gui.disableAddSongButton()
if self.currentSongData is not None:
self.gui.setSongCurrentlyPlaying(self.currentSongData[0], self.currentSongData[1])
self.d_queuedSongsRequest()
return
def __deactivateGui(self):
self.ignore(JukeboxGui.CLOSE_EVENT)
self.ignore(JukeboxGui.SONG_SELECT_EVENT)
self.ignore(JukeboxGui.MOVE_TO_TOP_CLICK_EVENT)
base.cr.playGame.getPlace().setState('walk')
base.localAvatar.startPosHprBroadcast()
base.localAvatar.enableAvatarControls()
self.gui.unload()
self.__localClearQueuedSong()
def isUserHost(self):
return self.party.partyInfo.hostId == base.localAvatar.doId
def d_queuedSongsRequest(self):
self.sendUpdate('queuedSongsRequest')
def queuedSongsResponse(self, songInfoList, index):
if self.gui.isLoaded():
for i in range(len(songInfoList)):
songInfo = songInfoList[i]
self.__addSongToQueue(songInfo, isLocalQueue=index >= 0 and i == index)
self.gui.enableAddSongButton()
def __handleGuiClose(self):
self.__deactivateGui()
self.d_toonExitDemand()
def __handleQueueSong(self, name, values):
self.d_setNextSong(values[0], values[1])
def d_setNextSong(self, phase, filename):
self.sendUpdate('setNextSong', [(phase, filename)])
def setSongInQueue(self, songInfo):
if self.gui.isLoaded():
phase = sanitizePhase(songInfo[0])
filename = songInfo[1]
data = self.getMusicData(phase, filename)
if data:
if self.localQueuedSongListItem is not None:
self.localQueuedSongListItem['text'] = data[0]
else:
self.__addSongToQueue(songInfo, isLocalQueue=True)
return
def __addSongToQueue(self, songInfo, isLocalQueue = False):
isHost = isLocalQueue and self.isUserHost()
data = self.getMusicData(sanitizePhase(songInfo[0]), songInfo[1])
if data:
listItem = self.gui.addSongToQueue(data[0], highlight=isLocalQueue, moveToTopButton=isHost)
if isLocalQueue:
self.localQueuedSongInfo = songInfo
self.localQueuedSongListItem = listItem
def __localClearQueuedSong(self):
self.localQueuedSongInfo = None
self.localQueuedSongListItem = None
return
def __play(self, phase, filename, length):
self.music = base.loadMusic((MUSIC_PATH + '%s') % (phase, filename))
if self.music:
if self.__checkPartyValidity() and hasattr(base.cr.playGame.getPlace().loader, 'music') and base.cr.playGame.getPlace().loader.music:
base.cr.playGame.getPlace().loader.music.stop()
self.music.setTime(0.0)
self.music.setLoopCount(getMusicRepeatTimes(length))
self.music.play()
self.currentSongData = (phase, filename)
def __stop(self):
self.currentSongData = None
if self.music:
self.music.stop()
if self.gui.isLoaded():
self.gui.clearSongCurrentlyPlaying()
return
def setSongPlaying(self, songInfo, toonId):
phase = sanitizePhase(songInfo[0])
filename = songInfo[1]
if not filename:
self.__stop()
return
data = self.getMusicData(phase, filename)
if data:
self.__play(phase, filename, data[1])
self.setSignNote(data[0])
if self.gui.isLoaded():
item = self.gui.popSongFromQueue()
self.gui.setSongCurrentlyPlaying(phase, filename)
if item == self.localQueuedSongListItem:
self.__localClearQueuedSong()
if toonId == localAvatar.doId:
localAvatar.setSystemMessage(0, TTLocalizer.PartyJukeboxNowPlaying)
def __handleMoveSongToTop(self):
if self.isUserHost() and self.localQueuedSongListItem is not None:
self.d_moveHostSongToTopRequest()
return
def d_moveHostSongToTopRequest(self):
self.notify.debug('d_moveHostSongToTopRequest')
self.sendUpdate('moveHostSongToTopRequest')
def moveHostSongToTop(self):
self.notify.debug('moveHostSongToTop')
if self.gui.isLoaded():
self.gui.pushQueuedItemToTop(self.localQueuedSongListItem)
def getMusicData(self, phase, filename):
data = []
phase = sanitizePhase(phase)
phase = self.phaseToMusicData.get(phase)
if phase:
data = phase.get(filename, [])
return data
def __checkPartyValidity(self):
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'loader') and base.cr.playGame.getPlace().loader:
return True
else:
return False
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/parties/DistributedPartyJukeboxActivityBase.py
|
Python
|
apache-2.0
| 9,133
|
import pickle
import redis
from pod_manager.settings import REDIS_HOST, REDIS_PORT, REDIS_DB
__all__ = [
'get_client',
'cache_object',
'get_object'
]
def get_client():
client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
return client
def cache_object(client, key, obj, ttl=60):
pipe = client.pipeline()
data = pickle.dumps(obj)
pipe.set(key, data)
if ttl:
pipe.expire(key, ttl)
pipe.execute()
def get_object(client, key):
data = client.get(key)
if not data:
return None
obj = pickle.loads(data)
return obj
|
racker/pod-manager
|
pod_manager/db.py
|
Python
|
apache-2.0
| 603
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, too-many-arguments, too-many-locals
# pylint: disable=too-many-public-methods, too-many-branches, too-many-lines
"""`BaseModule` defines an API for modules."""
import time
import logging
import warnings
from .. import metric
from .. import ndarray
from ..context import cpu
from ..model import BatchEndParam
from ..initializer import Uniform
from ..io import DataDesc
from ..base import _as_list
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments."""
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors."""
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match"""
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes
class BaseModule(object):
"""The base class of a module.
A module represents a computation component. One can think of module as a computation machine.
A module can execute forward and backward passes and update parameters in a model.
We aim to make the APIs easy to use, especially in the case when we need to use the imperative
API to work with multiple modules (e.g. stochastic depth network).
A module has several states:
- Initial state: Memory is not allocated yet, so the module is not ready for computation yet.
- Binded: Shapes for inputs, outputs, and parameters are all known, memory has been allocated,
and the module is ready for computation.
- Parameters are initialized: For modules with parameters, doing computation before
initializing the parameters might result in undefined outputs.
- Optimizer is installed: An optimizer can be installed to a module. After this, the parameters
of the module can be updated according to the optimizer after gradients are computed
(forward-backward).
In order for a module to interact with others, it must be able to report the
following information in its initial state (before binding):
- `data_names`: list of type string indicating the names of the required input data.
- `output_names`: list of type string indicating the names of the required outputs.
After binding, a module should be able to report the following richer information:
- state information
- `binded`: `bool`, indicates whether the memory buffers needed for computation
have been allocated.
- `for_training`: whether the module is bound for training.
- `params_initialized`: `bool`, indicates whether the parameters of this module
have been initialized.
- `optimizer_initialized`: `bool`, indicates whether an optimizer is defined
and initialized.
- `inputs_need_grad`: `bool`, indicates whether gradients with respect to the
input data are needed. Might be useful when implementing composition of modules.
- input/output information
- `data_shapes`: a list of `(name, shape)`. In theory, since the memory is allocated,
we could directly provide the data arrays. But in the case of data parallelism,
the data arrays might not be of the same shape as viewed from the external world.
- `label_shapes`: a list of `(name, shape)`. This might be `[]` if the module does
not need labels (e.g. it does not contains a loss function at the top), or a module
is not bound for training.
- `output_shapes`: a list of `(name, shape)` for outputs of the module.
- parameters (for modules with parameters)
- `get_params()`: return a tuple `(arg_params, aux_params)`. Each of those
is a dictionary of name to ``NDArray`` mapping. Those `NDArray` always lives on
CPU. The actual parameters used for computing might live on other devices (GPUs),
this function will retrieve (a copy of) the latest parameters.
- ``set_params(arg_params, aux_params)``: assign parameters to the devices
doing the computation.
- ``init_params(...)``: a more flexible interface to assign or initialize the parameters.
- setup
- `bind()`: prepare environment for computation.
- `init_optimizer()`: install optimizer for parameter updating.
- `prepare()`: prepare the module based on the current data batch.
- computation
- `forward(data_batch)`: forward operation.
- `backward(out_grads=None)`: backward operation.
- `update()`: update parameters according to installed optimizer.
- `get_outputs()`: get outputs of the previous forward operation.
- `get_input_grads()`: get the gradients with respect to the inputs computed
in the previous backward operation.
- `update_metric(metric, labels, pre_sliced=False)`: update performance metric
for the previous forward
computed results.
- other properties (mostly for backward compatibility)
- `symbol`: the underlying symbolic graph for this module (if any)
This property is not necessarily constant. For example, for `BucketingModule`,
this property is simply the *current* symbol being used. For other modules,
this value might not be well defined.
When those intermediate-level API are implemented properly, the following
high-level API will be automatically available for a module:
- `fit`: train the module parameters on a data set.
- `predict`: run prediction on a data set and collect outputs.
- `score`: run prediction on a data set and evaluate performance.
Examples
--------
>>> # An example of creating a mxnet module.
>>> import mxnet as mx
>>> data = mx.symbol.Variable('data')
>>> fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
>>> act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
>>> fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
>>> act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
>>> fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
>>> out = mx.symbol.SoftmaxOutput(fc3, name = 'softmax')
>>> mod = mx.mod.Module(out)
"""
def __init__(self, logger=logging):
self.logger = logger
self.binded = False
self.for_training = False
self.inputs_need_grad = False
self.params_initialized = False
self.optimizer_initialized = False
self._symbol = None
self._total_exec_bytes = 0
################################################################################
# High Level API
################################################################################
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True)
self.backward()
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value()
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None):
"""Iterates over predictions.
Example Usage:
----------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch)
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False, sparse_row_id_fn=None):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, sparse_row_id_fn=None):
"""Trains the module parameters.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMetric
Defaults to 'accuracy'. The performance measure used to display during training.
Other possible predefined metrics are:
'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'.
epoch_end_callback : function or list of functions
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Defaults to 'local'.
optimizer : str or Optimizer
Defaults to 'sgd'.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The parameters for
the optimizer constructor.
The default value is not a dict, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each mini-batch during evaluation.
initializer : Initializer
The initializer is called to initialize the module parameters when they are
not already initialized.
arg_params : dict
Defaults to ``None``, if not ``None``, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has a higher priority than `initializer`.
aux_params : dict
Defaults to ``None``. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params`
and `aux_params` are not ``None``. If this is ``True``, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Defaults to ``False``. Whether to force rebinding the executors if already bound.
force_init : bool
Defaults to ``False``. Indicates whether to force initialization even if the
parameters are already initialized.
begin_epoch : int
Defaults to 0. Indicates the starting epoch. Usually, if resumed from a
checkpoint saved at a previous training phase at epoch N, then this value should be
N+1.
num_epoch : int
Number of epochs for training.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using fit for training.
>>> # Assume training dataIter and validation dataIter are ready
>>> # Assume loading a previously checkpointed model
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 3)
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter, optimizer='sgd',
... optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
... arg_params=arg_params, aux_params=aux_params,
... eval_metric='acc', num_epoch=10, begin_epoch=3)
"""
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
if isinstance(data_batch, list):
self.update_metric(eval_metric,
[db.label for db in data_batch],
pre_sliced=True)
else:
self.update_metric(eval_metric, data_batch.label)
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn)
except StopIteration:
end_of_batch = True
if monitor is not None:
monitor.toc_print()
if end_of_batch:
eval_name_vals = eval_metric.get_name_value()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
nbatch += 1
# one epoch of training is finished
for name, val in eval_name_vals:
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
################################################################################
# Symbol information
################################################################################
@property
def data_names(self):
"""A list of names for data required by this module."""
raise NotImplementedError()
@property
def output_names(self):
"""A list of names for the outputs of this module."""
raise NotImplementedError()
################################################################################
# Input/Output information
################################################################################
@property
def data_shapes(self):
"""A list of (name, shape) pairs specifying the data inputs to this module."""
raise NotImplementedError()
@property
def label_shapes(self):
"""A list of (name, shape) pairs specifying the label inputs to this module.
If this module does not accept labels -- either it is a module without loss
function, or it is not bound for training, then this should return an empty
list ``[]``.
"""
raise NotImplementedError()
@property
def output_shapes(self):
"""A list of (name, shape) pairs specifying the outputs of this module."""
raise NotImplementedError()
################################################################################
# Parameters of a module
################################################################################
def get_params(self):
"""Gets parameters, those are potentially copies of the the actual parameters used
to do computation on the device.
Returns
-------
``(arg_params, aux_params)``
A pair of dictionaries each mapping parameter names to NDArray values.
Examples
--------
>>> # An example of getting module parameters.
>>> print mod.get_params()
({'fc2_weight': <NDArray 64x128 @cpu(0)>, 'fc1_weight': <NDArray 128x100 @cpu(0)>,
'fc3_bias': <NDArray 10 @cpu(0)>, 'fc3_weight': <NDArray 10x64 @cpu(0)>,
'fc2_bias': <NDArray 64 @cpu(0)>, 'fc1_bias': <NDArray 128 @cpu(0)>}, {})
"""
raise NotImplementedError()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, `force_init` will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of initializing module parameters.
>>> mod.init_params()
"""
raise NotImplementedError()
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
"""
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices
If `merge_multi_context` is ``True``, returns output of form ``[out1, out2]``.
Otherwise, it returns output of the form
``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All output elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
A list of ``NDArray`` or a list of list of ``NDArray``.
"""
assert self.binded and self.params_initialized
assert not merge_multi_context
return []
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArray
Source states arrays formatted like
``[[state1_dev1, state1_dev2], [state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
assert not states and not value
def install_monitor(self, mon):
"""Installs monitor on all executors."""
raise NotImplementedError()
################################################################################
# Computations
################################################################################
# pylint: disable=unused-argument
def prepare(self, data_batch, sparse_row_id_fn=None):
'''Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
'''
if sparse_row_id_fn is not None:
warnings.warn(UserWarning("sparse_row_id_fn is not invoked for BaseModule."))
# pylint: enable=unused-argument
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
Examples
--------
>>> import mxnet as mx
>>> from collections import namedtuple
>>> Batch = namedtuple('Batch', ['data'])
>>> data = mx.sym.Variable('data')
>>> out = data * 2
>>> mod = mx.mod.Module(symbol=out, label_names=None)
>>> mod.bind(data_shapes=[('data', (1, 10))])
>>> mod.init_params()
>>> data1 = [mx.nd.ones((1, 10))]
>>> mod.forward(Batch(data1))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]
>>> # Forward with data batch of different shape
>>> data2 = [mx.nd.ones((3, 5))]
>>> mod.forward(Batch(data2))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]]
"""
raise NotImplementedError()
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
Examples
--------
>>> # An example of backward computation.
>>> mod.backward()
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise,
it returns out put of form ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All the output elements have type `NDArray`. When `merge_multi_context` is ``False``,
those `NDArray` instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of `NDArray` or list of list of `NDArray`.
Output
Examples
--------
>>> # An example of getting forward output.
>>> print mod.get_outputs()[0].asnumpy()
[[ 0.09999977 0.10000153 0.10000716 0.10000195 0.09999853 0.09999743
0.10000272 0.10000113 0.09999088 0.09999888]]
"""
raise NotImplementedError()
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients to the inputs, computed in the previous backward computation.
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements have type `NDArray`. When `merge_multi_context` is ``False``, those `NDArray`
instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the gradients
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients.
Examples
--------
>>> # An example of getting input gradients.
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
Examples
--------
>>> # An example of updating module parameters.
>>> mod.init_optimizer(kvstore='local', optimizer='sgd',
... optimizer_params=(('learning_rate', 0.01), ))
>>> mod.backward()
>>> mod.update()
>>> print mod.get_params()[0]['fc3_weight'].asnumpy()
[[ 5.86930104e-03 5.28078526e-03 -8.88729654e-03 -1.08308345e-03
6.13054074e-03 4.27560415e-03 1.53817423e-03 4.62131854e-03
4.69872449e-03 -2.42400169e-03 9.94111411e-04 1.12386420e-03
...]]
"""
raise NotImplementedError()
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward
computation.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
Examples
--------
>>> # An example of updating evaluation metric.
>>> mod.forward(data_batch)
>>> mod.update_metric(metric, data_batch.label)
"""
raise NotImplementedError()
################################################################################
# module setup
################################################################################
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
"""
raise NotImplementedError()
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers, as well as initialize kvstore for
distributed training
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicates whether to force re-initializing an optimizer
if it is already installed.
Examples
--------
>>> # An example of initializing optimizer.
>>> mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.005),))
"""
raise NotImplementedError()
################################################################################
# misc
################################################################################
@property
def symbol(self):
"""Gets the symbol associated with this module.
Except for `Module`, for other types of modules (e.g. `BucketingModule`), this
property might not be a constant throughout its life time. Some modules might
not even be associated with any symbols.
"""
return self._symbol
|
rahul003/mxnet
|
python/mxnet/module/base_module.py
|
Python
|
apache-2.0
| 47,156
|
# Copyright 2017, Fabien Boucher
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from pecan import abort
from pecan import expose
from repoxplorer import version
from repoxplorer.index.projects import Projects
rx_version = version.get_version()
class ProjectsController(object):
@expose('json')
def projects(self, pid=None):
projects_index = Projects()
if pid:
project = projects_index.get(pid)
if not project:
abort(404, detail="Project ID has not been found")
return {pid: projects_index.get(pid)}
else:
projects = projects_index.get_projects(
source=['name', 'description', 'logo', 'refs'])
_projects = OrderedDict(
sorted(list(projects.items()), key=lambda t: t[0]))
return {'projects': _projects,
'tags': projects_index.get_tags()}
@expose('json')
def repos(self, pid=None, tid=None):
projects_index = Projects()
if not pid and not tid:
abort(404,
detail="A tag ID or project ID must be passed as parameter")
if pid:
project = projects_index.get(pid)
else:
if tid in projects_index.get_tags():
refs = projects_index.get_references_from_tags(tid)
project = {'refs': refs}
else:
project = None
if not project:
abort(404,
detail='Project ID or Tag ID has not been found')
return project['refs']
|
morucci/repoxplorer
|
repoxplorer/controllers/projects.py
|
Python
|
apache-2.0
| 2,146
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Load plugin assets from disk."""
import os.path
from tensorboard.compat import tf
_PLUGINS_DIR = "plugins"
def _IsDirectory(parent, item):
"""Helper that returns if parent/item is a directory."""
return tf.io.gfile.isdir(os.path.join(parent, item))
def PluginDirectory(logdir, plugin_name):
"""Returns the plugin directory for plugin_name."""
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)
def ListPlugins(logdir):
"""List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
"""
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
try:
entries = tf.io.gfile.listdir(plugins_dir)
except tf.errors.NotFoundError:
return []
# Strip trailing slashes, which listdir() includes for some filesystems
# for subdirectories, after using them to bypass IsDirectory().
return [
x.rstrip("/")
for x in entries
if x.endswith("/") or _IsDirectory(plugins_dir, x)
]
def ListAssets(logdir, plugin_name):
"""List all the assets that are available for given plugin in a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: A string name of a plugin to list assets for.
Returns:
A string list of available plugin assets. If the plugin subdirectory does
not exist (either because the logdir doesn't exist, or because the plugin
didn't register) an empty list is returned.
"""
plugin_dir = PluginDirectory(logdir, plugin_name)
try:
# Strip trailing slashes, which listdir() includes for some filesystems.
return [x.rstrip("/") for x in tf.io.gfile.listdir(plugin_dir)]
except tf.errors.NotFoundError:
return []
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
asset_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with tf.io.gfile.GFile(asset_path, "r") as f:
return f.read()
except tf.errors.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except tf.errors.OpError as e:
raise KeyError(
"Couldn't read asset path: %s, OpError %s" % (asset_path, e)
)
|
tensorflow/tensorboard
|
tensorboard/backend/event_processing/plugin_asset_util.py
|
Python
|
apache-2.0
| 3,555
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test management of KeyboardInterrupt in stratisd.
"""
# isort: LOCAL
import stratis_cli
from .._misc import SimTestCase
class KeyboardInterruptTestCase(SimTestCase):
"""
Test behavior of stratis on KeyboardInterrupt.
"""
def test_catch_keyboard_exception(self):
"""
Verify that the KeyboardInterrupt is propagated by the run() method.
./bin/stratis contains a try block at the outermost level which
then catches the KeyboardInterrupt and exits with an error message.
The KeyboardInterrupt is most likely raised in the dbus-python
method which is actually communicating on the D-Bus, but it is
fairly difficult to get at that method. Instead settle for getting
at the calling method generated by dbus-python-client-gen.
"""
def raise_keyboard_interrupt(_):
"""
Just raise the interrupt.
"""
raise KeyboardInterrupt()
# pylint: disable=import-outside-toplevel
# isort: LOCAL
from stratis_cli._actions import _data
# pylint: disable=protected-access
stratis_cli._actions._data.Manager.Properties.Version.Get = (
raise_keyboard_interrupt
)
with self.assertRaises(KeyboardInterrupt):
stratis_cli.run()(["daemon", "version"])
|
stratis-storage/stratis-cli
|
tests/whitebox/monkey_patching/test_keyboard_interrupt.py
|
Python
|
apache-2.0
| 1,934
|
#!/usr/bin/python
##########################################################################
#
# MTraceCheck
# Copyright 2017 The Regents of the University of Michigan
# Doowon Lee and Valeria Bertacco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
#
# This file should be called from codegen.py
#
####################################################################
# Data section
####################################################################
def generate_data_section(dataName, memLocs, strideType):
assert(memLocs <= 0x10000)
#dataArray = []
#for i in range(memLocs):
# data = [i & 0xFF, (i >> 8) & 0xFF, 0xFF, 0xFF]
# dataArray += data
## Data contents will be initialized in test manager, so just create a placeholder
if (strideType == 0):
dataArray = [0xFF for i in range(memLocs * 4 * 1)]
elif (strideType == 1):
dataArray = [0xFF for i in range(memLocs * 4 * 4)]
elif (strideType == 2):
dataArray = [0xFF for i in range(memLocs * 4 * 16)]
else:
assert(False)
dataFP = open(dataName, "w")
dataFP.write(bytearray(dataArray))
dataFP.close()
####################################################################
# BSS section (section to be written by test threads)
####################################################################
def generate_bss_section(bssName, bssSize):
#bssArray = []
#for i in range(bssSize):
# bssArray += [0x00]
#bssFP = open(bssName, "w")
#bssFP.write(bytearray(bssArray))
#bssFP.close()
# Faster code
bssFP = open(bssName, "wb")
bssFP.seek(bssSize-1)
bssFP.write("\0")
bssFP.close()
####################################################################
# Test manager CPP file
####################################################################
def generate_test_manager(cppName, headerName, threadList, bssBase, bssSizePerThread, signatureSize, regBitWidth, numExecutions, strideType):
# See an example of cpp file at exp/160815_test_manager/test_manager.cpp
# (This example is possibly outdated)
wordTypeString = "uint%d_t" % regBitWidth
cppString = ""
cppString += "#include <stdio.h>\n"
cppString += "#include <stdlib.h>\n"
cppString += "#include <stdint.h>\n"
cppString += "#include <pthread.h>\n"
cppString += "#include <map>\n"
cppString += "#include <vector>\n"
cppString += "#include \"%s\"\n" % headerName
for thread in threadList:
cppString += "extern \"C\" void* thread%d_routine(void*);\n" % thread
cppString += "volatile int thread_spawn_lock = 0;\n"
cppString += "#ifdef EXEC_SYNC\n"
cppString += "volatile int thread_exec_barrier0 = 0;\n"
cppString += "volatile int thread_exec_barrier1 = 0;\n"
cppString += "volatile int thread_exec_barrier_ptr = 0;\n"
cppString += "#endif\n"
cppString += "int main()\n"
cppString += "{\n"
cppString += " int pthread_return;\n"
cppString += " int numThreads = %d;\n" % len(threadList)
cppString += " // Test BSS section initialization\n"
cppString += " %s *bss_address = (%s *) TEST_BSS_SECTION;\n" % (wordTypeString, wordTypeString)
cppString += " for (int i = 0; i < numThreads * TEST_BSS_SIZE_PER_THREAD; i += sizeof(%s)) {\n" % (wordTypeString)
cppString += " *(bss_address++) = 0;\n"
cppString += " }\n"
cppString += " // Test data section initialization\n"
cppString += " uint32_t *data_address= (uint32_t *) TEST_DATA_SECTION;\n"
cppString += " for (int i = 0; i < NUM_SHARED_DATA; i++) {\n"
cppString += " *data_address = (uint32_t) (0xFFFF0000 | i);\n"
if (strideType == 0):
cppString += " data_address++; // strideType = 0\n"
elif (strideType == 1):
cppString += " data_address+=4; // strideType = 1\n"
elif (strideType == 2):
cppString += " data_address+=16; // strideType = 2\n"
else:
assert(False)
cppString += " }\n"
cppString += " pthread_t* threads = (pthread_t *) malloc(sizeof(pthread_t) * numThreads);\n"
for threadIndex in range(len(threadList)):
cppString += " pthread_return = pthread_create(&threads[%d], NULL, thread%d_routine, NULL);\n" % (threadIndex, threadList[threadIndex])
cppString += " for (int t = 0; t < numThreads; t++)\n"
cppString += " pthread_return = pthread_join(threads[t], NULL);\n"
cppString += " std::map<std::vector<%s>, int> signatureMap;\n" % (wordTypeString)
cppString += " std::vector<%s> resultVector;\n" % (wordTypeString)
cppString += " %s *signature = (%s *) TEST_BSS_SECTION;\n" % (wordTypeString, wordTypeString)
cppString += " for (int i = 0; i < EXECUTION_COUNT; i++) {\n"
cppString += " resultVector.clear();\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\"%8d:\", i);\n"
cppString += "#endif\n"
cppString += " for (int t = 0; t < numThreads; t++) {\n"
cppString += " for (int w = 0; w < SIGNATURE_SIZE_IN_WORD; w++) {\n"
# NOTE: SIGNATURE WORD REORDERING
#cppString += " for (int w = SIGNATURE_SIZE_IN_WORD - 1; w >= 0; w--) {\n"
#cppString += " for (int t = 0; t < numThreads; t++) {\n"
cppString += " %s address = (%s) signature + t * TEST_BSS_SIZE_PER_THREAD + w * sizeof(%s);\n" % (wordTypeString, wordTypeString, wordTypeString)
cppString += " %s result = (%s)*(%s*)address;\n" % (wordTypeString, wordTypeString, wordTypeString)
cppString += " resultVector.push_back(result);\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\" 0x%%0%dlx\", result);\n" % (regBitWidth / 8 * 2)
#cppString += " printf(\" 0x%%lx 0x%%0%dlx\", address, result);\n" % signatureSize
cppString += "#endif\n"
cppString += " }\n"
cppString += " }\n"
cppString += " if (signatureMap.find(resultVector) == signatureMap.end())\n"
cppString += " signatureMap[resultVector] = 1;\n"
cppString += " else\n"
cppString += " signatureMap[resultVector]++;\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\"\\n\");\n"
cppString += "#endif\n"
cppString += " signature += SIGNATURE_SIZE_IN_WORD;\n"
cppString += " }\n"
cppString += "#ifndef NO_PRINT\n"
cppString += " for (std::map<std::vector<%s>, int>::iterator it = signatureMap.begin(); it != signatureMap.end(); it++) {\n" % (wordTypeString)
cppString += " for (int i = 0; i < (it->first).size(); i++)\n"
cppString += " printf(\" 0x%%0%dlx\", (it->first)[i]);\n" % (regBitWidth / 8 * 2)
cppString += " printf(\": %d\\n\", it->second);\n"
cppString += " }\n"
cppString += "#endif\n"
cppString += " printf(\"Number of unique results %lu out of %d\\n\", signatureMap.size(), EXECUTION_COUNT);\n"
cppString += " fflush(stdout);\n"
cppString += " return 0;\n"
cppString += "}\n"
cppFP = open(cppName, "w")
cppFP.write(cppString)
cppFP.close()
def manager_common(headerName, dataName, dataBase, memLocs, bssName, bssBase, bssSizePerThread, cppName, threadList, signatureSize, regBitWidth, numExecutions, platform, strideType, verbosity):
if (platform == "linuxpthread"):
# Data section and BSS section
generate_data_section(dataName, memLocs, strideType)
if (verbosity > 0):
print("Data binary file %s generated (base 0x%X, size %d)" % (dataName, dataBase, memLocs * 4))
bssSize = bssSizePerThread * len(threadList)
generate_bss_section(bssName, bssSize)
if (verbosity > 0):
print("BSS binary file %s generated (base 0x%X, size %d)" % (bssName, bssBase, bssSize))
generate_test_manager(cppName, headerName, threadList, bssBase, bssSizePerThread, signatureSize, regBitWidth, numExecutions, strideType)
if (verbosity > 0):
print("Test manager %s generated" % (cppName))
####################################################################
# Compute signature size (maximum signature size across all threads)
####################################################################
def compute_max_signature_size(intermediate, regBitWidth):
maxSignatureFlushCount = 0
perthreadSignatureSizes = dict()
for thread in intermediate:
pathCount = 0
signatureFlushCount = 0
for intermediateCode in intermediate[thread]:
if (intermediateCode["type"] == "profile"):
# reg, targets
if ((pathCount * len(intermediateCode["targets"])) > ((1 << regBitWidth) - 1)):
pathCount = 0
signatureFlushCount += 1
if (pathCount == 0):
pathCount = len(intermediateCode["targets"])
else:
pathCount = pathCount * len(intermediateCode["targets"])
perthreadSignatureSizes[thread] = (signatureFlushCount + 1) * regBitWidth / 8
if (signatureFlushCount > maxSignatureFlushCount):
maxSignatureFlushCount = signatureFlushCount
# Number of bytes for each signature
temp = (maxSignatureFlushCount + 1) * regBitWidth / 8
# Log2 ceiling function
power2Boundary = 1
while (power2Boundary < temp):
power2Boundary <<= 1
return [max(power2Boundary, regBitWidth / 8), perthreadSignatureSizes]
|
leedoowon/MTraceCheck
|
src_main/codegen_common.py
|
Python
|
apache-2.0
| 10,228
|
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import timesince, floatformat
from horizon import tables
from horizon.templatetags.sizeformat import mbformat
class CSVSummary(tables.LinkAction):
name = "csv_summary"
verbose_name = _("Download CSV Summary")
classes = ("btn-download",)
iconfont = "iconfont icon-folderadd media-object"
card = "card card-blue"
def get_link_url(self, usage=None):
return self.table.kwargs['usage'].csv_link()
class BaseUsageTable(tables.DataTable):
vcpus = tables.Column('vcpus', verbose_name=_("VCPUs"))
disk = tables.Column('local_gb', verbose_name=_("Disk"))
memory = tables.Column('memory_mb',
verbose_name=_("RAM"),
filters=(mbformat,),
attrs={"data-type": "size"})
hours = tables.Column('vcpu_hours', verbose_name=_("VCPU Hours"),
filters=(lambda v: floatformat(v, 2),))
class GlobalUsageTable(BaseUsageTable):
tenant = tables.Column('tenant_name', verbose_name=_("Project Name"))
disk_hours = tables.Column('disk_gb_hours',
verbose_name=_("Disk GB Hours"),
filters=(lambda v: floatformat(v, 2),))
def get_object_id(self, datum):
return datum.tenant_id
class Meta:
name = "global_usage"
verbose_name = _("Usage Summary")
columns = ("tenant", "vcpus", "disk", "memory",
"hours", "disk_hours")
table_actions = (CSVSummary,)
multi_select = False
def get_instance_link(datum):
view = "horizon:project:instances:detail"
if datum.get('instance_id', False):
return urlresolvers.reverse(view, args=(datum.get('instance_id'),))
else:
return None
class TenantUsageTable(BaseUsageTable):
instance = tables.Column('name',
verbose_name=_("Instance Name"),
link=get_instance_link)
uptime = tables.Column('uptime_at',
verbose_name=_("Uptime"),
filters=(timesince,))
def get_object_id(self, datum):
return datum.get('instance_id', id(datum))
class Meta:
name = "tenant_usage"
verbose_name = _("Usage Summary")
columns = ("instance", "vcpus", "disk", "memory", "uptime")
table_actions = ()
multi_select = False
|
MKTCloud/MKTCloud
|
openstack_dashboard/usage/tables.py
|
Python
|
apache-2.0
| 2,531
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import jsonschema
import mock
from rally import consts
from rally.plugins.common.hook import sys_call
from rally.task import hook
from tests.unit import fakes
from tests.unit import test
class SysCallHookTestCase(test.TestCase):
def test_validate(self):
hook.Hook.validate(
{
"name": "sys_call",
"description": "list folder",
"args": "ls",
"trigger": {
"name": "event",
"args": {
"unit": "iteration",
"at": [10]
}
}
}
)
def test_validate_error(self):
conf = {
"name": "sys_call",
"description": "list folder",
"args": {
"cmd": 50,
},
"trigger": {
"name": "event",
"args": {
"unit": "iteration",
"at": [10]
}
}
}
self.assertRaises(
jsonschema.ValidationError, hook.Hook.validate, conf)
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
@mock.patch("subprocess.Popen")
def test_run(self, mock_popen, mock_timer):
popen_instance = mock_popen.return_value
popen_instance.returncode = 0
task = mock.MagicMock()
sys_call_hook = sys_call.SysCallHook(task, "/bin/bash -c 'ls'",
{"iteration": 1}, "dummy_action")
sys_call_hook.run_sync()
sys_call_hook.validate_result_schema()
self.assertEqual(
{
"hook": "sys_call",
"description": "dummy_action",
"triggered_by": {"iteration": 1},
"started_at": fakes.FakeTimer().timestamp(),
"finished_at": fakes.FakeTimer().finish_timestamp(),
"status": consts.HookStatus.SUCCESS,
}, sys_call_hook.result())
mock_popen.assert_called_once_with(
["/bin/bash", "-c", "ls"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
@mock.patch("subprocess.Popen")
def test_run_error(self, mock_popen, mock_timer):
popen_instance = mock_popen.return_value
popen_instance.returncode = 1
popen_instance.stdout.read.return_value = b"No such file or directory"
task = mock.MagicMock()
sys_call_hook = sys_call.SysCallHook(task, "/bin/bash -c 'ls'",
{"iteration": 1}, "dummy_action")
sys_call_hook.run_sync()
sys_call_hook.validate_result_schema()
self.assertEqual(
{
"hook": "sys_call",
"description": "dummy_action",
"triggered_by": {"iteration": 1},
"started_at": fakes.FakeTimer().timestamp(),
"finished_at": fakes.FakeTimer().finish_timestamp(),
"status": consts.HookStatus.FAILED,
"error": [
"n/a",
"Subprocess returned 1",
"No such file or directory",
]
}, sys_call_hook.result())
mock_popen.assert_called_once_with(
["/bin/bash", "-c", "ls"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
|
vganapath/rally
|
tests/unit/plugins/common/hook/test_sys_call.py
|
Python
|
apache-2.0
| 4,165
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import PluginSubsystemBase
class IndentationSubsystem(PluginSubsystemBase):
options_scope = 'pycheck-indentation'
def get_plugin_type(self):
from pants.contrib.python.checks.tasks.checkstyle.indentation import Indentation
return Indentation
|
foursquare/pants
|
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/indentation_subsystem.py
|
Python
|
apache-2.0
| 559
|
##
# Copyright (c) 2006-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.util import parsequoted
from caldavclientlibrary.protocol.http.util import parsetoken
from caldavclientlibrary.protocol.http.util import parseStatusLine
import unittest
class TestParseQuoted(unittest.TestCase):
def testParseQuotedOK(self):
data = {
"\"\"" : ("", ""),
"\"quoted\"" : ("quoted", ""),
"\"quoted words\"" : ("quoted words", ""),
"\"quoting a \\\"word\\\"\"" : ("quoting a \"word\"", ""),
"\"\" after" : ("", "after"),
"\"quoted\" after" : ("quoted", "after"),
"\"quoted words\" after" : ("quoted words", "after"),
"\"quoting a \\\"word\\\"\" after" : ("quoting a \"word\"", "after"),
"\"quoting a \\\"word\\\" after\" after": ("quoting a \"word\" after", "after"),
"\"quoted\"after" : ("quoted", "after"),
"\"" : ("", ""),
"\"unterminated" : ("unterminated", ""),
"\"unterminated words" : ("unterminated words", ""),
"\"unterminated a \\\"word\\\"" : ("unterminated a \"word\"", ""),
}
for input, result in data.iteritems():
self.assertEqual(parsequoted(input), result)
def testParseQuotedBAD(self):
data = (
"",
"unquoted",
"unquoted \"quoted\"",
)
for input in data:
self.assertRaises(AssertionError, parsequoted, input)
class TestParseToken(unittest.TestCase):
def testParseTokenOK(self):
data = {
"" : ("", ""),
"unquoted" : ("unquoted", ""),
"unquoted words" : ("unquoted", "words"),
"unquoted words" : ("unquoted", "words"),
"unquoting a \"word\"" : ("unquoting", "a \"word\""),
"unquoted\twords" : ("unquoted", "words"),
"unquoting\ta \"word\"" : ("unquoting", "a \"word\""),
"unquoted: words" : ("unquoted", "words"),
"unquoting: a \"word\"" : ("unquoting", "a \"word\""),
"\"\"" : ("", ""),
"\"quoted\"" : ("quoted", ""),
"\"quoted words\"" : ("quoted words", ""),
"\"quoting a \\\"word\\\"\"" : ("quoting a \"word\"", ""),
"\"\" after" : ("", "after"),
"\"quoted\" after" : ("quoted", "after"),
"\"quoted words\" after" : ("quoted words", "after"),
"\"quoting a \\\"word\\\"\" after" : ("quoting a \"word\"", "after"),
"\"quoting a \\\"word\\\" after\" after": ("quoting a \"word\" after", "after"),
"\"quoted\"after" : ("quoted", "after"),
"\"" : ("", ""),
"\"unterminated" : ("unterminated", ""),
"\"unterminated words" : ("unterminated words", ""),
"\"unterminated a \\\"word\\\"" : ("unterminated a \"word\"", ""),
}
for input, result in data.iteritems():
self.assertEqual(parsetoken(input, " \t:"), result)
class TestParseStatusLine(unittest.TestCase):
def testParseTokenOK(self):
self.assertEqual(parseStatusLine("HTTP/1.1 200 OK"), 200)
def testParseTokenBadStatus(self):
self.assertEqual(parseStatusLine("HTTP/1.2 2001 OK"), 0)
def testParseTokenBadVersion(self):
self.assertEqual(parseStatusLine("HTTP/1.2 200 OK"), 0)
def testParseTokenBadNumber(self):
self.assertEqual(parseStatusLine("HTTP/1.1 OK"), 0)
def testParseTokenBad(self):
self.assertEqual(parseStatusLine("HTTP/1.1"), 0)
|
skarra/CalDAVClientLibrary
|
caldavclientlibrary/protocol/http/tests/test_util.py
|
Python
|
apache-2.0
| 4,886
|
from transitfeed import TYPE_ERROR, TYPE_WARNING, TYPE_NOTICE
from oba_rvtd_monitor.feedvalidator import LimitPerTypeProblemAccumulator
class MonitoringProblemAccumulator(LimitPerTypeProblemAccumulator):
pass
|
trilliumtransit/oba_rvtd_monitor
|
oba_rvtd_monitor/problems.py
|
Python
|
apache-2.0
| 216
|
"""
This component provides basic support for Amcrest IP cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.amcrest/
"""
import logging
from homeassistant.components.amcrest import (
DATA_AMCREST, STREAM_SOURCE_LIST, TIMEOUT)
from homeassistant.components.camera import Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import CONF_NAME
from homeassistant.helpers.aiohttp_client import (
async_get_clientsession, async_aiohttp_proxy_web,
async_aiohttp_proxy_stream)
DEPENDENCIES = ['amcrest', 'ffmpeg']
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up an Amcrest IP Camera."""
if discovery_info is None:
return
device_name = discovery_info[CONF_NAME]
amcrest = hass.data[DATA_AMCREST][device_name]
async_add_entities([AmcrestCam(hass, amcrest)], True)
return True
class AmcrestCam(Camera):
"""An implementation of an Amcrest IP camera."""
def __init__(self, hass, amcrest):
"""Initialize an Amcrest camera."""
super(AmcrestCam, self).__init__()
self._name = amcrest.name
self._camera = amcrest.device
self._base_url = self._camera.get_base_url()
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = amcrest.ffmpeg_arguments
self._stream_source = amcrest.stream_source
self._resolution = amcrest.resolution
self._token = self._auth = amcrest.authentication
def camera_image(self):
"""Return a still image response from the camera."""
# Send the request to snap a picture and return raw jpg data
response = self._camera.snapshot(channel=self._resolution)
return response.data
async def handle_async_mjpeg_stream(self, request):
"""Return an MJPEG stream."""
# The snapshot implementation is handled by the parent class
if self._stream_source == STREAM_SOURCE_LIST['snapshot']:
return await super().handle_async_mjpeg_stream(request)
if self._stream_source == STREAM_SOURCE_LIST['mjpeg']:
# stream an MJPEG image stream directly from the camera
websession = async_get_clientsession(self.hass)
streaming_url = self._camera.mjpeg_url(typeno=self._resolution)
stream_coro = websession.get(
streaming_url, auth=self._token, timeout=TIMEOUT)
return await async_aiohttp_proxy_web(
self.hass, request, stream_coro)
# streaming via ffmpeg
from haffmpeg import CameraMjpeg
streaming_url = self._camera.rtsp_url(typeno=self._resolution)
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
streaming_url, extra_cmd=self._ffmpeg_arguments)
try:
return await async_aiohttp_proxy_stream(
self.hass, request, stream,
self._ffmpeg.ffmpeg_stream_content_type)
finally:
await stream.close()
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
PetePriority/home-assistant
|
homeassistant/components/amcrest/camera.py
|
Python
|
apache-2.0
| 3,307
|
#
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gafferpy import gaffer as g
from gafferpy import gaffer_connector
def run(host, verbose=False):
return run_with_connector(create_connector(host, verbose))
def run_with_connector(gc):
print()
print('Running operations')
print('--------------------------')
print()
get_schema(gc)
get_filter_functions(gc)
get_class_filter_functions(gc)
get_element_generators(gc)
get_object_generators(gc)
get_operations(gc)
get_serialised_fields(gc)
get_store_traits(gc)
is_operation_supported(gc)
add_elements(gc)
get_elements(gc)
get_adj_seeds(gc)
get_all_elements(gc)
get_walks(gc)
generate_elements(gc)
generate_domain_objs(gc)
generate_domain_objects_chain(gc)
get_element_group_counts(gc)
get_sub_graph(gc)
export_to_gaffer_result_cache(gc)
get_job_details(gc)
get_all_job_details(gc)
add_named_operation(gc)
get_all_named_operations(gc)
named_operation(gc)
delete_named_operation(gc)
add_named_view_summarise(gc)
add_named_view_date_range(gc)
get_all_named_views(gc)
named_view_summarise(gc)
named_view_date_range(gc)
named_views(gc)
delete_named_views(gc)
sort_elements(gc)
max_element(gc)
min_element(gc)
to_vertices_to_entity_seeds(gc)
complex_op_chain(gc)
op_chain_in_json(gc)
def create_connector(host, verbose=False):
return gaffer_connector.GafferConnector(host, verbose)
def get_schema(gc):
# Get Schema
result = gc.execute_get(
g.GetSchema()
)
print('Schema:')
print(result)
print()
def get_filter_functions(gc):
# Get filter functions
result = gc.execute_get(
g.GetFilterFunctions()
)
print('Filter Functions:')
print(result)
print()
def get_class_filter_functions(gc):
# Get class filter functions
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetClassFilterFunctions(class_name=class_name)
)
print('Class Filter Functions (IsMoreThan):')
print(result)
print()
def get_element_generators(gc):
# Get Element generators
result = gc.execute_get(
g.GetElementGenerators()
)
print('Element generators:')
print(result)
print()
def get_object_generators(gc):
# Get Object generators
result = gc.execute_get(
g.GetObjectGenerators()
)
print('Object generators:')
print(result)
print()
def get_operations(gc):
# Get operations
result = gc.execute_get(
g.GetOperations()
)
print('Operations:')
print(result)
print()
def get_serialised_fields(gc):
# Get serialised fields
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetSerialisedFields(class_name=class_name)
)
print('Serialised Fields (IsMoreThan):')
print(result)
print()
def get_store_traits(gc):
# Get Store Traits
result = gc.execute_get(
g.GetStoreTraits()
)
print('Store Traits:')
print(result)
print()
def is_operation_supported(gc):
# Is operation supported
operation = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements'
result = gc.is_operation_supported(
g.IsOperationSupported(operation=operation)
)
print(
'\nOperation supported ("uk.gov.gchq.gaffer.operation.impl.add.AddElements"):')
print(result)
print()
def add_elements(gc):
# Add Elements
gc.execute_operation(
g.AddElements(
input=[
g.Entity(
group='JunctionUse',
vertex='M1:1',
properties={
'countByVehicleType': g.freq_map({
'BUS': 10,
'CAR': 50
}),
'endDate': g.date(1034319600000),
'count': g.long(60),
'startDate': g.date(1034316000000)
}
),
g.Edge(
group='RoadHasJunction',
source='M1',
destination='M1:1',
directed=True,
properties={}
)
]
)
)
print('Elements have been added')
print()
def get_elements(gc):
# Get Elements
input = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed('M5:10'),
# Edge input can be provided as follows
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.EITHER),
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.DIRECTED),
# Or you can use True or False for the direction
g.EdgeSeed('M5:10', 'M5:11', True)
],
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[],
transient_properties=[
g.Property('description', 'java.lang.String')
],
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['count'],
predicate=g.IsMoreThan(
value=g.long(1)
)
)
],
transform_functions=[
g.FunctionContext(
selection=['SOURCE', 'DESTINATION', 'count'],
function=g.Function(
class_name='uk.gov.gchq.gaffer.traffic.transform.DescriptionTransform'
),
projection=['description']
)
]
)
]
),
directed_type=g.DirectedType.EITHER
)
)
print('Related input')
print(input)
print()
def get_adj_seeds(gc):
# Adjacent Elements - chain 2 adjacent entities together
adj_seeds = gc.execute_operations(
[
g.GetAdjacentIds(
input=[
g.EntitySeed(
vertex='M5'
)
],
view=g.View(
edges=[
g.ElementDefinition(
'RoadHasJunction',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
'RoadUse',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
)
]
)
print('Adjacent entities - 2 hop')
print(adj_seeds)
print()
def get_all_elements(gc):
# Get all input, but limit the total results to 3
all_elements = gc.execute_operations(
operations=[
g.GetAllElements(),
g.Limit(result_limit=3)
]
)
print('All input (Limited to first 3)')
print(all_elements)
print()
def get_walks(gc):
# Get walks from M32 traversing down RoadHasJunction then JunctionLocatedAt
walks = gc.execute_operation(
g.GetWalks(
input=[
g.EntitySeed('M32'),
],
operations=[
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction'
)
]
)
),
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group='JunctionLocatedAt'
)
]
)
)
]
)
)
print(
'Walks from M32 traversing down RoadHasJunction then JunctionLocatedAt')
print(walks)
print()
def generate_elements(gc):
# Generate Elements
input = gc.execute_operation(
g.GenerateElements(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.traffic.generator.RoadTrafficStringElementGenerator'
),
input=[
'"South West","E06000054","Wiltshire","6016","389200","179080","M4","LA Boundary","381800","180030","17","391646","179560","TM","E","2000","2000-05-03 00:00:00","7","0","9","2243","15","426","127","21","20","37","106","56","367","3060"'
]
)
)
print('Generated input from provided domain input')
print(input)
print()
def generate_domain_objs(gc):
# Generate Domain Objects - single provided element
input = gc.execute_operation(
g.GenerateObjects(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator'
),
input=[
g.Entity('entity', '1'),
g.Edge('edge', '1', '2', True)
]
)
)
print('Generated input from provided input')
print(input)
print()
def generate_domain_objects_chain(gc):
# Generate Domain Objects - chain of get input then generate input
input = gc.execute_operations(
[
g.GetElements(
input=[g.EntitySeed(vertex='M5')],
seed_matching_type=g.SeedMatchingType.RELATED,
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction',
group_by=[]
)
]
)
),
g.GenerateObjects(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator'
)
)
]
)
print('Generated input from get input by seed')
print(input)
print()
def get_element_group_counts(gc):
# Get Elements
group_counts = gc.execute_operations([
g.GetElements(
input=[g.EntitySeed('M5')]
),
g.CountGroups(limit=1000)
])
print('Groups counts (limited to 1000 input)')
print(group_counts)
print()
def get_sub_graph(gc):
# Export and Get to/from an in memory set
entity_seeds = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('South West')],
include_incoming_out_going=g.InOutType.OUT
),
g.ExportToSet(),
g.GetAdjacentIds(include_incoming_out_going=g.InOutType.OUT),
g.ExportToSet(),
g.DiscardOutput(),
g.GetSetExport()
]
)
print('Export and Get to/from an in memory set')
print(entity_seeds)
print()
def export_to_gaffer_result_cache(gc):
# Export to Gaffer Result Cache and Get from Gaffer Result Cache
job_details = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('South West')],
include_incoming_out_going=g.InOutType.OUT
),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetJobDetails()
]
)
print('Export to Gaffer Result Cache. Job Details:')
print(job_details)
print()
job_id = job_details['jobId']
entity_seeds = gc.execute_operation(
g.GetGafferResultCacheExport(job_id=job_id),
)
print('Get Gaffer Result Cache Export.')
print(entity_seeds)
print()
def get_job_details(gc):
# Get all job details
job_details_initial = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('1')],
),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetJobDetails()
]
)
job_id = job_details_initial['jobId']
job_details = gc.execute_operation(
g.GetJobDetails(job_id=job_id),
)
print('Get job details')
print(job_details)
print()
def get_all_job_details(gc):
# Get all job details
all_job_details = gc.execute_operation(
g.GetAllJobDetails(),
)
print('Get all job details (just prints the first 3 results)')
print(all_job_details[:3])
print()
def delete_named_operation(gc):
gc.execute_operation(
g.DeleteNamedOperation('2-hop-with-limit')
)
print('Deleted named operation: 2-hop-with-limit')
print()
def add_named_operation(gc):
gc.execute_operation(
g.AddNamedOperation(
operation_chain={
"operations": [{
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"includeIncomingOutGoing": "OUTGOING"
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"includeIncomingOutGoing": "OUTGOING"
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.Limit",
"resultLimit": "${param1}"
}]
},
operation_name='2-hop-with-limit',
description='2 hop query with limit',
overwrite_flag=True,
read_access_roles=["read-user"],
write_access_roles=["write-user"],
parameters=[
g.NamedOperationParameter(
name="param1",
description="Limit param",
default_value=1,
value_class="java.lang.Long",
required=False
)
]
)
)
print('Added named operation: 2-hop-with-limit')
print()
def get_all_named_operations(gc):
namedOperations = gc.execute_operation(
g.GetAllNamedOperations()
)
print('Named operations')
print(namedOperations)
print()
def named_operation(gc):
result = gc.execute_operation(
g.NamedOperation(
operation_name='2-hop-with-limit',
parameters={
'param1': 2
},
input=[
g.EntitySeed('M5')
]
)
)
print('Execute named operation')
print(result)
print()
def delete_named_views(gc):
gc.execute_operation(
g.DeleteNamedView(name='summarise')
)
print('Deleted named view: summarise')
gc.execute_operation(
g.DeleteNamedView(name='dateRange')
)
print('Deleted named view: dateRange')
print()
def add_named_view_summarise(gc):
gc.execute_operation(
g.AddNamedView(
view=g.View(
global_elements=[
g.GlobalElementDefinition(group_by=[])
]
),
name='summarise',
description='Summarises all results (overrides the groupBy to an empty array).',
overwrite_flag=True
)
)
print('Added named view: summarise')
print()
def add_named_view_date_range(gc):
gc.execute_operation(
g.AddNamedView(
view=g.View(
global_elements=g.GlobalElementDefinition(
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['startDate'],
predicate=g.InDateRange(
start='${start}',
end='${end}'
)
)
]
)
),
name='dateRange',
description='Filters results to a provided date range.',
overwrite_flag=True,
parameters=[
g.NamedViewParameter(
name="start",
description="A date string for the start of date range.",
value_class="java.lang.String",
required=False
),
g.NamedViewParameter(
name="end",
description="A date string for the end of the date range.",
value_class="java.lang.String",
required=False
)
]
)
)
print('Added named view: dateRange')
print()
def get_all_named_views(gc):
namedViews = gc.execute_operation(
g.GetAllNamedViews()
)
print('Named views')
print(namedViews)
print()
def named_view_summarise(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=g.NamedView(
name="summarise"
)
)
)
print('Execute get elements with summarised named view')
print(result)
print()
def named_view_date_range(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=g.NamedView(
name="dateRange",
parameters={
'start': '2005/05/03 06:00',
'end': '2005/05/03 09:00'
}
)
)
)
print('Execute get elements with date range named view')
print(result)
print()
def named_views(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=[
g.NamedView(
name="summarise"
),
g.NamedView(
name="dateRange",
parameters={
'start': '2005/05/03 06:00',
'end': '2005/05/03 09:00'
}
)
]
)
)
print('Execute get elements with summarised and date range named views')
print(result)
print()
def sort_elements(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Sort(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
],
result_limit=5
)
])
print('Sorted input')
print(input)
print()
def max_element(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Max(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
]
)
])
print('Max element')
print(input)
print()
def min_element(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Min(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
]
)
])
print('Min element')
print(input)
print()
def to_vertices_to_entity_seeds(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetElements(
input=[
g.EntitySeed(
vertex='South West'
)
],
view=g.View(
edges=[
g.ElementDefinition(
'RegionContainsLocation',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.ToVertices(
edge_vertices=g.EdgeVertices.DESTINATION,
use_matched_vertex=g.UseMatchedVertex.OPPOSITE
),
g.ToEntitySeeds(),
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
'LocationContainsRoad',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.Limit(5)
])
print('ToVertices then ToEntitySeeds')
print(input)
print()
def complex_op_chain(gc):
# All road junctions in the South West that were heavily used by buses in year 2000.
junctions = gc.execute_operations(
operations=[
g.GetAdjacentIds(
input=[g.EntitySeed(vertex='South West')],
view=g.View(
edges=[
g.ElementDefinition(
group='RegionContainsLocation',
group_by=[]
)
]
)
),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
group='LocationContainsRoad',
group_by=[]
)
]
)
),
g.ToSet(),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction',
group_by=[]
)
]
)
),
g.GetElements(
view=g.View(
entities=[
g.ElementDefinition(
group='JunctionUse',
group_by=[],
transient_properties=[
g.Property('busCount', 'java.lang.Long')
],
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['startDate'],
predicate=g.InDateRange(
start='2000/01/01',
end='2001/01/01'
)
)
],
post_aggregation_filter_functions=[
g.PredicateContext(
selection=['countByVehicleType'],
predicate=g.PredicateMap(
predicate=g.IsMoreThan(
value={'java.lang.Long': 1000},
or_equal_to=False
),
key='BUS'
)
)
],
transform_functions=[
g.FunctionContext(
selection=['countByVehicleType'],
function=g.FreqMapExtractor(key='BUS'),
projection=['busCount']
)
]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.ToCsv(
element_generator=g.CsvGenerator(
fields={
'VERTEX': 'Junction',
'busCount': 'Bus Count'
},
quoted=False
),
include_header=True
)
]
)
print(
'All road junctions in the South West that were heavily used by buses in year 2000.')
print(junctions)
print()
def op_chain_in_json(gc):
# Operation chain defined in json
result = gc.execute_operation_chain(
{
"class": "uk.gov.gchq.gaffer.operation.OperationChain",
"operations": [{
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.CountGroups"
}]
}
)
print('Operation chain defined in json')
print(result)
print()
if __name__ == "__main__":
run('http://localhost:8080/rest/latest', False)
|
gchq/gaffer-tools
|
python-shell/src/example.py
|
Python
|
apache-2.0
| 26,676
|
#
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#!/usr/bin/env python
usage = '''
Write buildbot spec to outfile based on the bot name:
$ python buildbot_spec.py outfile Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug
Or run self-tests:
$ python buildbot_spec.py test
'''
import inspect
import json
import os
import sys
import builder_name_schema
import dm_flags
import nanobench_flags
CONFIG_COVERAGE = 'Coverage'
CONFIG_DEBUG = 'Debug'
CONFIG_RELEASE = 'Release'
def lineno():
caller = inspect.stack()[1] # Up one level to our caller.
return inspect.getframeinfo(caller[0]).lineno
# Since we don't actually start coverage until we're in the self-test,
# some function def lines aren't reported as covered. Add them to this
# list so that we can ignore them.
cov_skip = []
cov_start = lineno()+1 # We care about coverage starting just past this def.
def gyp_defines(builder_dict):
gyp_defs = {}
# skia_arch_type.
if builder_dict['role'] == builder_name_schema.BUILDER_ROLE_BUILD:
arch = builder_dict['target_arch']
elif builder_dict['role'] == builder_name_schema.BUILDER_ROLE_HOUSEKEEPER:
arch = None
else:
arch = builder_dict['arch']
arch_types = {
'x86': 'x86',
'x86_64': 'x86_64',
'Arm7': 'arm',
'Arm64': 'arm64',
'Mips': 'mips32',
'Mips64': 'mips64',
'MipsDSP2': 'mips32',
}
if arch in arch_types:
gyp_defs['skia_arch_type'] = arch_types[arch]
# housekeeper: build shared lib.
if builder_dict['role'] == builder_name_schema.BUILDER_ROLE_HOUSEKEEPER:
gyp_defs['skia_shared_lib'] = '1'
# skia_gpu.
if builder_dict.get('cpu_or_gpu') == 'CPU':
gyp_defs['skia_gpu'] = '0'
# skia_warnings_as_errors.
werr = False
if builder_dict['role'] == builder_name_schema.BUILDER_ROLE_BUILD:
if 'Win' in builder_dict.get('os', ''):
if not ('GDI' in builder_dict.get('extra_config', '') or
'Exceptions' in builder_dict.get('extra_config', '')):
werr = True
elif ('Mac' in builder_dict.get('os', '') and
'Android' in builder_dict.get('extra_config', '')):
werr = False
else:
werr = True
gyp_defs['skia_warnings_as_errors'] = str(int(werr)) # True/False -> '1'/'0'
# Win debugger.
if 'Win' in builder_dict.get('os', ''):
gyp_defs['skia_win_debuggers_path'] = 'c:/DbgHelp'
# Qt SDK (Win).
if 'Win' in builder_dict.get('os', ''):
if builder_dict.get('os') == 'Win8':
gyp_defs['qt_sdk'] = 'C:/Qt/Qt5.1.0/5.1.0/msvc2012_64/'
else:
gyp_defs['qt_sdk'] = 'C:/Qt/4.8.5/'
# ANGLE.
if builder_dict.get('extra_config') == 'ANGLE':
gyp_defs['skia_angle'] = '1'
if builder_dict.get('os', '') in ('Ubuntu', 'Linux'):
gyp_defs['use_x11'] = '1'
gyp_defs['chromeos'] = '0'
# GDI.
if builder_dict.get('extra_config') == 'GDI':
gyp_defs['skia_gdi'] = '1'
# Build with Exceptions on Windows.
if ('Win' in builder_dict.get('os', '') and
builder_dict.get('extra_config') == 'Exceptions'):
gyp_defs['skia_win_exceptions'] = '1'
# iOS.
if (builder_dict.get('os') == 'iOS' or
builder_dict.get('extra_config') == 'iOS'):
gyp_defs['skia_os'] = 'ios'
# Shared library build.
if builder_dict.get('extra_config') == 'Shared':
gyp_defs['skia_shared_lib'] = '1'
# Build fastest Skia possible.
if builder_dict.get('extra_config') == 'Fast':
gyp_defs['skia_fast'] = '1'
# PDF viewer in GM.
if (builder_dict.get('os') == 'Mac10.8' and
builder_dict.get('arch') == 'x86_64' and
builder_dict.get('configuration') == 'Release'):
gyp_defs['skia_run_pdfviewer_in_gm'] = '1'
# Clang.
if builder_dict.get('compiler') == 'Clang':
gyp_defs['skia_clang_build'] = '1'
# Valgrind.
if 'Valgrind' in builder_dict.get('extra_config', ''):
gyp_defs['skia_release_optimization_level'] = '1'
# Link-time code generation just wastes time on compile-only bots.
if (builder_dict.get('role') == builder_name_schema.BUILDER_ROLE_BUILD and
builder_dict.get('compiler') == 'MSVC'):
gyp_defs['skia_win_ltcg'] = '0'
# Mesa.
if (builder_dict.get('extra_config') == 'Mesa' or
builder_dict.get('cpu_or_gpu_value') == 'Mesa'):
gyp_defs['skia_mesa'] = '1'
# VisualBench
if builder_dict.get('extra_config') == 'VisualBench':
gyp_defs['skia_use_sdl'] = '1'
# skia_use_android_framework_defines.
if builder_dict.get('extra_config') == 'Android_FrameworkDefs':
gyp_defs['skia_use_android_framework_defines'] = '1'
# Skia dump stats for perf tests and gpu
if (builder_dict.get('cpu_or_gpu') == 'GPU' and
builder_dict.get('role') == 'Perf'):
gyp_defs['skia_dump_stats'] = '1'
# CommandBuffer.
if builder_dict.get('extra_config') == 'CommandBuffer':
gyp_defs['skia_command_buffer'] = '1'
# Vulkan.
if builder_dict.get('extra_config') == 'Vulkan':
gyp_defs['skia_vulkan'] = '1'
return gyp_defs
cov_skip.extend([lineno(), lineno() + 1])
def get_extra_env_vars(builder_dict):
env = {}
if builder_dict.get('configuration') == CONFIG_COVERAGE:
# We have to use Clang 3.6 because earlier versions do not support the
# compile flags we use and 3.7 and 3.8 hit asserts during compilation.
env['CC'] = '/usr/bin/clang-3.6'
env['CXX'] = '/usr/bin/clang++-3.6'
elif builder_dict.get('compiler') == 'Clang':
env['CC'] = '/usr/bin/clang'
env['CXX'] = '/usr/bin/clang++'
# SKNX_NO_SIMD, SK_USE_DISCARDABLE_SCALEDIMAGECACHE, etc.
extra_config = builder_dict.get('extra_config', '')
if extra_config.startswith('SK') and extra_config.isupper():
env['CPPFLAGS'] = '-D' + extra_config
return env
cov_skip.extend([lineno(), lineno() + 1])
def build_targets_from_builder_dict(builder_dict, do_test_steps, do_perf_steps):
"""Return a list of targets to build, depending on the builder type."""
if builder_dict['role'] in ('Test', 'Perf') and builder_dict['os'] == 'iOS':
return ['iOSShell']
if builder_dict.get('extra_config') == 'Appurify':
return ['VisualBenchTest_APK']
t = []
if do_test_steps:
t.append('dm')
if do_perf_steps and builder_dict.get('extra_config') == 'VisualBench':
t.append('visualbench')
elif do_perf_steps:
t.append('nanobench')
if t:
return t
else:
return ['most']
cov_skip.extend([lineno(), lineno() + 1])
def device_cfg(builder_dict):
# Android.
if 'Android' in builder_dict.get('extra_config', ''):
if 'NoNeon' in builder_dict['extra_config']:
return 'arm_v7'
return {
'Arm64': 'arm64',
'x86': 'x86',
'x86_64': 'x86_64',
'Mips': 'mips',
'Mips64': 'mips64',
'MipsDSP2': 'mips_dsp2',
}.get(builder_dict['target_arch'], 'arm_v7_neon')
elif builder_dict.get('os') == 'Android':
return {
'AndroidOne': 'arm_v7_neon',
'GalaxyS3': 'arm_v7_neon',
'GalaxyS4': 'arm_v7_neon',
'NVIDIA_Shield': 'arm64',
'Nexus10': 'arm_v7_neon',
'Nexus5': 'arm_v7_neon',
'Nexus6': 'arm_v7_neon',
'Nexus7': 'arm_v7_neon',
'Nexus7v2': 'arm_v7_neon',
'Nexus9': 'arm64',
'NexusPlayer': 'x86',
}[builder_dict['model']]
# ChromeOS.
if 'CrOS' in builder_dict.get('extra_config', ''):
if 'Link' in builder_dict['extra_config']:
return 'link'
if 'Daisy' in builder_dict['extra_config']:
return 'daisy'
elif builder_dict.get('os') == 'ChromeOS':
return {
'Link': 'link',
'Daisy': 'daisy',
}[builder_dict['model']]
# iOS.
if 'iOS' in builder_dict.get('os', ''):
return {
'iPad4': 'iPad4,1',
}[builder_dict['model']]
return None
cov_skip.extend([lineno(), lineno() + 1])
def product_board(builder_dict):
if 'Android' in builder_dict.get('os', ''):
return {
'AndroidOne': None, # TODO(borenet,kjlubick)
'GalaxyS3': 'smdk4x12',
'GalaxyS4': None, # TODO(borenet,kjlubick)
'NVIDIA_Shield': None, # TODO(borenet,kjlubick)
'Nexus10': 'manta',
'Nexus5': 'hammerhead',
'Nexus6': 'shamu',
'Nexus7': 'grouper',
'Nexus7v2': 'flo',
'Nexus9': 'flounder',
'NexusPlayer': 'fugu',
}[builder_dict['model']]
return None
cov_skip.extend([lineno(), lineno() + 1])
def get_builder_spec(builder_name):
builder_dict = builder_name_schema.DictForBuilderName(builder_name)
env = get_extra_env_vars(builder_dict)
gyp_defs = gyp_defines(builder_dict)
gyp_defs_list = ['%s=%s' % (k, v) for k, v in gyp_defs.iteritems()]
gyp_defs_list.sort()
env['GYP_DEFINES'] = ' '.join(gyp_defs_list)
rv = {
'builder_cfg': builder_dict,
'dm_flags': dm_flags.get_args(builder_name),
'env': env,
'nanobench_flags': nanobench_flags.get_args(builder_name),
}
device = device_cfg(builder_dict)
if device:
rv['device_cfg'] = device
board = product_board(builder_dict)
if board:
rv['product.board'] = board
role = builder_dict['role']
if role == builder_name_schema.BUILDER_ROLE_HOUSEKEEPER:
configuration = CONFIG_RELEASE
else:
configuration = builder_dict.get(
'configuration', CONFIG_DEBUG)
arch = (builder_dict.get('arch') or builder_dict.get('target_arch'))
if ('Win' in builder_dict.get('os', '') and arch == 'x86_64'):
configuration += '_x64'
rv['configuration'] = configuration
if configuration == CONFIG_COVERAGE:
rv['do_compile_steps'] = False
rv['do_test_steps'] = role == builder_name_schema.BUILDER_ROLE_TEST
rv['do_perf_steps'] = (role == builder_name_schema.BUILDER_ROLE_PERF or
(role == builder_name_schema.BUILDER_ROLE_TEST and
configuration == CONFIG_DEBUG))
if rv['do_test_steps'] and 'Valgrind' in builder_name:
rv['do_perf_steps'] = True
if 'GalaxyS4' in builder_name:
rv['do_perf_steps'] = False
rv['build_targets'] = build_targets_from_builder_dict(
builder_dict, rv['do_test_steps'], rv['do_perf_steps'])
# Do we upload perf results?
upload_perf_results = False
if role == builder_name_schema.BUILDER_ROLE_PERF:
upload_perf_results = True
rv['upload_perf_results'] = upload_perf_results
# Do we upload correctness results?
skip_upload_bots = [
'ASAN',
'Coverage',
'MSAN',
'TSAN',
'UBSAN',
'Valgrind',
]
upload_dm_results = True
for s in skip_upload_bots:
if s in builder_name:
upload_dm_results = False
break
rv['upload_dm_results'] = upload_dm_results
return rv
cov_end = lineno() # Don't care about code coverage past here.
def self_test():
import coverage # This way the bots don't need coverage.py to be installed.
args = {}
cases = [
'Build-Mac10.8-Clang-Arm7-Debug-Android',
'Build-Win-MSVC-x86-Debug',
'Build-Win-MSVC-x86-Debug-GDI',
'Build-Win-MSVC-x86-Debug-Exceptions',
'Build-Ubuntu-GCC-Arm7-Debug-Android_FrameworkDefs',
'Build-Ubuntu-GCC-Arm7-Debug-Android_NoNeon',
'Build-Ubuntu-GCC-Arm7-Debug-CrOS_Daisy',
'Build-Ubuntu-GCC-x86_64-Debug-CrOS_Link',
'Build-Ubuntu-GCC-x86_64-Release-Mesa',
'Build-Ubuntu-GCC-x86_64-Release-ANGLE',
'Housekeeper-PerCommit',
'Perf-Win8-MSVC-ShuttleB-GPU-HD4600-x86_64-Release-Trybot',
'Perf-Ubuntu-GCC-ShuttleA-GPU-GTX660-x86_64-Release-VisualBench',
'Test-Android-GCC-GalaxyS4-GPU-SGX544-Arm7-Debug',
'Perf-Android-GCC-Nexus5-GPU-Adreno330-Arm7-Release-Appurify',
'Test-Android-GCC-Nexus6-GPU-Adreno420-Arm7-Debug',
'Test-ChromeOS-GCC-Link-CPU-AVX-x86_64-Debug',
'Test-iOS-Clang-iPad4-GPU-SGX554-Arm7-Debug',
'Test-Mac-Clang-MacMini6.2-GPU-HD4000-x86_64-Debug-CommandBuffer',
'Test-Mac10.8-Clang-MacMini4.1-GPU-GeForce320M-x86_64-Release',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Coverage',
('Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-'
'SK_USE_DISCARDABLE_SCALEDIMAGECACHE'),
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Fast',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Shared',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
'Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-Vulkan',
'Test-Win8-MSVC-ShuttleB-GPU-HD4600-x86-Release-ANGLE',
'Test-Win8-MSVC-ShuttleA-CPU-AVX-x86_64-Debug',
]
cov = coverage.coverage()
cov.start()
for case in cases:
args[case] = get_builder_spec(case)
cov.stop()
this_file = os.path.basename(__file__)
_, _, not_run, _ = cov.analysis(this_file)
filtered = [line for line in not_run if
line > cov_start and line < cov_end and line not in cov_skip]
if filtered:
print 'Lines not covered by test cases: ', filtered
sys.exit(1)
golden = this_file.replace('.py', '.json')
with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
json.dump(args, f, indent=2, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
self_test()
sys.exit(0)
if len(sys.argv) != 3:
print usage
sys.exit(1)
with open(sys.argv[1], 'w') as out:
json.dump(get_builder_spec(sys.argv[2]), out)
|
qrealka/skia-hc
|
tools/buildbot_spec.py
|
Python
|
apache-2.0
| 13,404
|
import base64
import errno
import random
import ssl
import threading
import time
import copy
import websocket
import xmltodict
import config
import debug
from utils import exitutils
import hubs.isy.isycodes as isycodes
import logsupport
from controlevents import CEvent, PostEvent, ConsoleEvent, PostIfInterested
from hubs.isy.isycodes import EVENT_CTRL, formatwsitem
from logsupport import ConsoleWarning, ConsoleError, ConsoleDetail, ConsoleDetailHigh
from utils.threadmanager import ThreadStartException
from utils.utilfuncs import safeprint
class ISYEMInternalError(Exception):
pass
def BaseAddr(addr):
return None if addr is None else ' '.join(addr.split(' ')[0:-1])
class ISYEventMonitor(object):
def __init__(self, thisISY):
self.connectionmode = 'try994' # trypolisy: trying without subp, polisy: connection worked, try994: trying with subp 994worked.
self.isy = thisISY
self.hubname = thisISY.name
self.QHnum = 1
self.a = base64.b64encode((self.isy.user + ':' + self.isy.password).encode('utf-8'))
self.watchstarttime = time.time()
self.watchlist = []
self.streamid = "unset"
self.seq = 0
self.lastheartbeat = 0
self.hbcount = 0
self.AlertNodes = {}
self.delayedstart = 0
self.longdown = 0
self.WS = None
self.THstate = 'init'
self.querycnt = 0
self.queryqueued = {}
self.LastMsgErr = ('***', -99)
self.isy.Busy = 0
self.lasterror = 'Init'
debug.debugPrint('DaemonCtl', "Queue Handler ", self.QHnum, " started: ", self.watchstarttime)
self.reportablecodes = ["DON", "DFON", "DOF", "DFOF", "ST", "CLISP", "CLISPH", "CLISPC", "CLIFS",
"CLIMD", "CLIHUM", "CLIHCS", "BRT", "DIM"] # "RR", "OL",
def EndWSServer(self):
self.lasterror = "DirectCommError"
self.WS.close()
def RealQuery(self, enode, seq, ndnm):
logsupport.Logs.Log("Queued query attempt (" + str(seq) + ") for: " + ndnm)
time.sleep(105 + random.randint(0, 30)) # allow any in progress query at ISY a chance to clear
if enode not in self.isy.ErrNodes:
logsupport.Logs.Log("Node error cleared without need of query (" + str(seq) + ") for: " + ndnm)
return
logsupport.Logs.Log(self.hubname + ": Attempt query (" + str(seq) + ") for errored node: " + ndnm,
severity=ConsoleWarning)
r = self.isy.try_ISY_comm('query/' + enode, timeout=60, closeonfail=False)
if r == '':
logsupport.Logs.Log(self.hubname + ": Query (" + str(seq) + ") attempt failed for node: " + ndnm,
severity=ConsoleWarning)
else:
time.sleep(2)
logsupport.Logs.Log(self.hubname + ": Query (" + str(seq) + ") attempt succeeded for node: " + ndnm)
if enode in self.isy.ErrNodes: del self.isy.ErrNodes[enode]
if enode in self.queryqueued: del self.queryqueued[enode]
def DoNodeQuery(self, enode, ndnm):
if enode not in self.queryqueued:
self.querycnt += 1
self.queryqueued[enode] = self.querycnt
t = threading.Thread(name='Query-' + str(self.querycnt) + '-' + enode, target=self.RealQuery, daemon=True,
args=(enode, self.querycnt, ndnm))
t.start()
else:
logsupport.Logs.Log(
self.hubname + ": Query " + str(self.queryqueued[enode]) + " already queued for node: " + ndnm)
def FakeNodeChange(self):
# noinspection PyArgumentList
PostEvent(ConsoleEvent(CEvent.HubNodeChange, hub=self.isy.name, node=None, value=-1))
def reinit(self):
self.watchstarttime = time.time()
self.watchlist = []
self.seq = 0
self.hbcount = 0
self.QHnum += 1
def PostStartQHThread(self):
if self.isy.version == -1:
# test mode
return
hungcount = 40
while self.THstate == 'restarting':
logsupport.Logs.Log(self.hubname + " Waiting thread start")
time.sleep(2)
hungcount -= 1
if hungcount < 0: raise ThreadStartException
while self.THstate == 'delaying':
time.sleep(1)
hungcount = 60
while self.THstate == 'starting':
logsupport.Logs.Log(self.hubname + ": Waiting initial status dump")
time.sleep(2)
hungcount -= 1
if hungcount < 0: raise ThreadStartException
if self.THstate == 'running':
self.isy._HubOnline = True
self.isy.Vars.CheckValsUpToDate(reload=True)
logsupport.Logs.Log(self.hubname + ": Initial status streamed ", self.seq, " items and vars updated")
elif self.THstate == 'failed':
logsupport.Logs.Log(self.hubname + " Failed Thread Restart", severity=ConsoleWarning)
else:
logsupport.Logs.Log(self.hubname + " Unknown ISY QH Thread state")
def PreRestartQHThread(self):
self.isy._HubOnline = False
self.THstate = 'restarting'
try:
if self.lasterror == 'ISYSocketTimeOut':
logsupport.Logs.Log(self.hubname + '(TimeoutError) Wait for likely router reboot or down',
severity=ConsoleWarning, tb=False)
self.delayedstart = 150
self.reinit()
return
if self.lasterror == 'ISYWSTimeOut':
logsupport.Logs.Log(self.hubname + ' WS restart after surprise close - short delay (15)',
severity=ConsoleWarning)
self.delayedstart = 15
elif self.lasterror == 'ISYNetDown':
# likely home network down so wait a bit
logsupport.Logs.Log(self.hubname + ' WS restart for NETUNREACH - delay likely router reboot or down',
severity=ConsoleWarning)
self.delayedstart = 121
elif self.lasterror == 'ISYClose':
logsupport.Logs.Log(self.hubname + ' Recovering closed WS stream')
self.delayedstart = 2
elif self.lasterror == 'DirectCommError':
logsupport.Logs.Log(self.hubname + ' WS restart because of failed direct communication failure')
self.delayedstart = 90 # probably ISY doing query
elif self.lasterror == 'ISYNoRoute':
logsupport.Logs.Log("{}: Hub probably down (semi) permanently ({})".self.name, self.longdown)
self.delayedstart = 3600 + self.longdown * 1800 # spread checks way out
self.isy._HubOnline = False
self.longdown += 1
else:
logsupport.Logs.Log(self.hubname + ' Unexpected error on WS stream: ', self.lasterror,
severity=ConsoleError, tb=False)
self.delayedstart = 90
except Exception as e:
logsupport.Logs.Log(self.hubname + ' PreRestartQH internal error ', e)
self.reinit()
def QHandler(self):
def on_error(qws, error):
self.isy.HBWS.Entry(repr(error))
self.lasterror = "ISYUnknown"
reconsev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
if isinstance(error, websocket.WebSocketConnectionClosedException):
logsupport.Logs.Log(self.hubname + " WS connection closed - attempt to recontact ISY",
severity=reconsev)
self.lasterror = 'ISYClose'
elif isinstance(error, websocket.WebSocketTimeoutException):
logsupport.Logs.Log(self.hubname + " WS connection timed out", severity=ConsoleWarning)
self.lasterror = 'ISYWSTimeOut'
elif isinstance(error, TimeoutError):
logsupport.Logs.Log(self.hubname + " WS socket timed out", severity=ConsoleWarning)
self.lasterror = 'ISYSocketTimeOut'
elif isinstance(error, AttributeError):
logsupport.Logs.Log(self.hubname + " WS library bug", severity=ConsoleWarning)
self.lasterror = 'ISYClose'
elif isinstance(error, OSError):
if error.errno == errno.ENETUNREACH:
logsupport.Logs.Log(self.hubname + " WS network down", severity=ConsoleWarning)
self.lasterror = 'ISYNetDown'
else:
logsupport.Logs.Log(self.hubname + ' WS OS error', repr(error), severity=ConsoleError, tb=False)
self.lasterror = 'ISYNoRoute' # probably semi permanent failure
else:
if self.connectionmode == 'try994':
logsupport.Logs.Log("{}: Connection failed using 994 convention".format(self.hubname))
self.connectionmode = 'trypolisy'
elif self.connectionmode == 'trypolisy':
logsupport.Logs.Log("{}: Connection failed using Polisy convention".format(self.hubname))
self.connectionmode = 'try994'
else:
logsupport.Logs.Log(self.hubname + " Error in WS stream " + str(self.QHnum) + ': ' + repr(error),
severity=ConsoleError,
tb=True)
logsupport.Logs.Log(repr(websocket.WebSocketConnectionClosedException))
self.THstate = 'failed'
debug.debugPrint('DaemonCtl', "Websocket stream error", self.QHnum, repr(error))
qws.close()
# noinspection PyUnusedLocal
def on_close(qws, code, reason):
self.isy.HBWS.Entry("Close")
reconsev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
logsupport.Logs.Log("{} WS stream {} closed: {}:{}".format(self.hubname, self.QHnum, code, reason),
severity=reconsev, hb=True)
debug.debugPrint('DaemonCtl', "ISY Websocket stream closed", str(code), str(reason))
def on_open(qws):
self.isy.HBWS.Entry("Open")
self.THstate = 'starting'
if self.connectionmode == 'try994':
self.connectionmode = '994worked'
logsupport.Logs.Log('{} connection worked using 994 convention'.format(self.isy.name))
elif self.connectionmode == 'trypolisy':
self.connectionmode = 'polisyworked'
logsupport.Logs.Log('{} connection worked using Polisy convention'.format(self.isy.name))
mess = '994' if self.connectionmode == '994worked' else 'Polisy' if self.connectionmode == 'polisyworked' else self.connectionmode
logsupport.Logs.Log("{}: WS stream {} opened ({})".format(self.hubname, self.QHnum, mess))
debug.debugPrint('DaemonCtl', "Websocket stream opened: ", self.QHnum, self.streamid)
self.WS = qws
# noinspection PyUnusedLocal,PyUnboundLocalVariable
def on_message(qws, message):
loopstart = time.time()
self.isy.HBWS.Entry('Message: {}'.format(repr(message)))
# print('Message: {}'.format(message))
try:
m = 'parse error'
m = xmltodict.parse(message)
msav = copy.deepcopy(m)
if debug.dbgStore.GetVal('ISYDump'):
debug.ISYDump("isystream.dmp", message, pretty=False)
# print(m)
if 'SubscriptionResponse' in m:
sr = m['SubscriptionResponse']
if self.streamid != sr['SID']:
self.streamid = sr['SID']
logsupport.Logs.Log("{}: Stream id: {}".format(self.hubname, self.streamid))
elif 'Event' in m:
E = m['Event']
esid = E.pop('@sid', 'No sid')
if self.streamid != esid:
logsupport.Logs.Log(
self.hubname + " Unexpected event stream change: " + self.streamid + "/" + str(esid),
severity=ConsoleError, tb=False)
exitutils.FatalError("WS Stream ID Changed")
eseq = int(E.pop('@seqnum', -99))
if self.seq != eseq:
logsupport.Logs.Log(
self.hubname + " Event mismatch - Expected: " + str(self.seq) + " Got: " + str(eseq),
severity=ConsoleWarning)
raise ISYEMInternalError
else:
self.seq += 1
ecode = E.pop('control', 'Missing control')
if ecode in EVENT_CTRL:
prcode = EVENT_CTRL[ecode]
else:
prcode = "**" + ecode + "**"
eaction = E.pop('action', 'No action')
enode = E.pop('node', 'No node')
eInfo = E.pop('eventInfo', 'No EventInfo')
if isinstance(eaction, dict):
debug.debugPrint('DaemonStream', "V5 stream - pull up action value: ", eaction)
eaction = eaction["#text"] # the new xmltodict will return as data['action']['#text']
if enode in self.isy.NodesByAddr: # get the node to set if any
N = self.isy.NodesByAddr[enode]
else:
N = None
if ecode == 'ST': # update cached state first before posting alerts or race
if isinstance(N, isycodes.ThermType):
N.cur = isycodes.NormalizeState(eaction)
elif N is not None:
oldstate = N.devState
N.devState = isycodes.NormalizeState(eaction)
logsupport.Logs.Log('ISYchg', 'ISY Node: ', N.name, ' state change from: ', oldstate,
' to: ', N.devState, severity=ConsoleDetailHigh)
if (oldstate == N.devState) and self.THstate == 'running':
logsupport.Logs.Log(self.hubname +
" State report with no change: " + N.name + ' state: ' + str(
oldstate))
else:
logsupport.Logs.Log(self.hubname +
" Status change for " + N.name + '(' + str(enode) + ') to ' + str(
N.devState), severity=ConsoleDetailHigh)
# status changed to post to any alerts that want it
# since alerts can only react to the state of a node we check only on an ST message
# screens on the other hand may need to know about other actions (thermostat e.g.)
# so they get checked below under reportablecodes
# if I check alerts there I get extra invocations for the DON and DOF e.g. which while not
# harmful are anomolous
if enode in self.AlertNodes:
# alert node changed
debug.debugPrint('DaemonCtl', 'ISY reports change(alert):',
self.isy.NodesByAddr[enode].name)
for a in self.AlertNodes[enode]:
if self.THstate != 'running':
# this is a restart or initial dump so indicate upwards to avoid misleading log entry
if a.state == 'Armed':
a.state = 'Init'
logsupport.Logs.Log(self.hubname + " Node alert fired: " + str(a),
severity=ConsoleDetail)
# noinspection PyArgumentList
PostEvent(ConsoleEvent(CEvent.ISYAlert, hub=self.isy.name, node=enode,
value=isycodes.NormalizeState(eaction), alert=a))
elif ecode == 'CLIHCS' and isinstance(N, isycodes.ThermType):
N.statecode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIFS' and isinstance(N, isycodes.ThermType):
N.fancode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIMD' and isinstance(N, isycodes.ThermType):
N.modecode = isycodes.NormalizeState(eaction)
elif ecode == 'CLIHUM' and isinstance(N, isycodes.ThermType):
N.hum = isycodes.NormalizeState(eaction)
elif ecode == 'CLISPH' and isinstance(N, isycodes.ThermType):
N.setlow = isycodes.NormalizeState(eaction)
elif ecode == 'CLISPC' and isinstance(N, isycodes.ThermType):
N.sethigh = isycodes.NormalizeState(eaction)
if ecode in self.reportablecodes:
# Node change report
debug.debugPrint('DaemonStream', time.time() - config.sysStore.ConsoleStartTime,
"Status update in stream: ",
eseq, ":",
prcode, " : ", enode, " : ", eInfo, " : ", eaction)
# logsupport.Logs.Log('reportable event '+str(ecode)+' for '+str(enode)+' action '+str(eaction))
PostIfInterested(self.isy, enode, isycodes.NormalizeState(eaction))
elif (prcode == 'Trigger') and (eaction == '6'):
vinfo = eInfo['var']
vartype = int(vinfo['@type'])
varid = int(vinfo['@id'])
varval = int(vinfo['val'])
debug.debugPrint('DaemonCtl', 'Var change: ', self.isy.Vars.GetNameFromAttr((vartype, varid)),
' set to ', varval)
debug.debugPrint('DaemonCtl', 'Var change:', ('Unkn', 'Integer', 'State')[vartype],
' variable ', varid,
' set to ', varval)
try:
self.isy.Vars.SetValByAttr((vartype, varid), varval, modifier=True)
except KeyError:
logsupport.Logs.Log(
"Unknown variable from " + self.hubname + " - probably added since startup",
severity=ConsoleWarning)
elif prcode == 'Heartbeat':
if self.hbcount > 0:
# wait 2 heartbeats
self.THstate = 'running'
self.lastheartbeat = time.time()
self.hbcount += 1
elif prcode == 'Billing':
self.THstate = 'running'
else:
pass # handle any other?
efmtact = E.pop('fmtAct', 'v4stream')
efmtnm = E.pop('fmtName', 'noName')
if E:
lev = ConsoleDetailHigh if str(
enode) in self.isy.V3Nodes else ConsoleWarning # supress to detail if it is a V3 node
logsupport.Logs.Log(
self.hubname + " Extra info in event: " + str(ecode) + '/' + str(prcode) + '/' + str(
eaction) + '/' + str(enode) + '/' + str(eInfo) + ' ' + str(E), severity=lev)
debug.debugPrint('DaemonStream', time.time() - config.sysStore.ConsoleStartTime,
formatwsitem(esid, eseq, ecode, eaction, enode, eInfo, E, self.isy))
try:
isynd = self.isy.NodesByAddr[enode].name
except (KeyError, AttributeError):
isynd = enode
if ecode == '_5':
now = time.time()
if str(eaction) == '1':
# logsupport.Logs.Log(self.hubname, ' went busy')
self.isy.Busy = now
elif str(eaction) == '0':
if self.isy.Busy != 0:
# logsupport.Logs.Log(self.hubname, " cleared busy")
if now - self.isy.Busy > 10:
logsupport.Logs.Log(
"{}: busy for {:.4f} seconds".format(self.hubname, now - self.isy.Busy))
self.isy.Busy = 0
else:
logsupport.Logs.Log(self.hubname, " reported stand-alone not busy")
else:
logsupport.Logs.Log(self.hubname, " reported System Status: ", str(eaction))
if ecode == "ST" or (ecode == "_3" and eaction == "CE"):
if self.LastMsgErr[0] != '***' and (
BaseAddr(self.LastMsgErr[0]) == BaseAddr(enode)):
# ERR msg followed by clearing - ISY weirdness?
logsupport.Logs.Log(
"{} reported and immediately cleared error for node: {} ({}) (seq:{}/{})".format(
self.hubname,
isynd, BaseAddr(self.LastMsgErr[0]), self.LastMsgErr[1], eseq),
severity=ConsoleWarning, hb=True)
self.LastMsgErr = ('***', -99)
elif enode in self.isy.ErrNodes:
logsupport.Logs.Log("{} cleared comm error for node: {}".format(self.hubname, isynd))
if enode in self.isy.ErrNodes:
# logsupport.Logs.Log("Query thread still running")
del self.isy.ErrNodes[enode]
if self.LastMsgErr != ('***', -99):
# previous message was ERR and wasn't immediately cleared
try:
isyerrnd = self.isy.NodesByAddr[self.LastMsgErr[0]].name
except (KeyError, AttributeError):
isyerrnd = self.LastMsgErr[0]
logsupport.Logs.Log(
"{} WS stream shows comm error for node: {}(Seq:{})".format(self.hubname, isyerrnd,
self.LastMsgErr[1]),
severity=ConsoleWarning, hb=True)
if self.LastMsgErr[0] not in self.isy.ErrNodes:
self.isy.ErrNodes[self.LastMsgErr[0]] = eseq
self.DoNodeQuery(self.LastMsgErr[0], isyerrnd)
self.LastMsgErr = ('***', -99)
if ecode == "ERR":
if str(eaction) == "0":
pass
# logsupport.Logs.Log("ERR(0) seen: {}".format(repr(m)))
else:
# Note the error and wait one message to see if it immediately clears
self.LastMsgErr = (enode, eseq)
logsupport.Logs.Log("ERR(1) seen: {}".format(repr(xmltodict.parse(message))),
severity=ConsoleWarning)
if ecode == "_3" and eaction == "NE":
self.LastMsgErr = (enode, eseq)
logsupport.Logs.Log(
"{} WS stream reported NE error code on WS stream for node{}(Seq:{})".format(self.hubname,
isynd, eseq),
hb=True)
else:
logsupport.Logs.Log(self.hubname + " Strange item in event stream: " + str(m),
severity=ConsoleWarning)
safeprint(message)
except Exception as E:
logsupport.Logs.Log(self.hubname + " Exception in QH on message: ", repr(msav), ' Excp: ', repr(E),
severity=ConsoleWarning)
loopend = time.time()
self.isy.HBWS.Entry('Processing time: {} Done: {}'.format(loopend - loopstart, repr(
message)))
time.sleep(.001) # force thread to give up processor to allow response to time events
if self.isy.version == -1:
self.isy._HubOnline = True
time.sleep(7)
with open('/home/pi/Console/isystream.dmp', 'r') as f:
mes = f.readline() # absorb first
# safeprint("Message1: {}".format(mes))
while True:
mes = f.readline().rstrip('\n')
if mes == '':
# safeprint('Done')
break
# safeprint("Message: {}".format(mes))
on_message(None, mes)
time.sleep(.4)
while True:
time.sleep(500)
return
self.THstate = 'delaying'
logsupport.Logs.Log("{}: WS stream thread {} setup".format(self.hubname, self.QHnum), severity=ConsoleDetail)
if self.delayedstart != 0:
logsupport.Logs.Log(self.hubname + " Delaying Hub restart for probable network reset: ",
str(self.delayedstart), ' seconds')
time.sleep(self.delayedstart)
# websocket.enableTrace(True)
websocket.setdefaulttimeout(30)
if self.isy.addr.startswith('http://'):
wsurl = 'ws://' + self.isy.addr[7:] + '/rest/subscribe'
elif self.isy.addr.startswith('https://'):
wsurl = 'wss://' + self.isy.addr[8:] + '/rest/subscribe'
else:
wsurl = 'ws://' + self.isy.addr + '/rest/subscribe'
import logging
WStrace = open('/home/pi/WStrace', 'w')
print('Open {}'.format(wsurl), file=WStrace)
websocket.enableTrace(True, handler=logging.StreamHandler(stream=WStrace))
while True:
try:
# noinspection PyArgumentList
if self.connectionmode in ('trypolisy', 'polisyworked'):
ws = websocket.WebSocketApp(wsurl, on_message=on_message,
on_error=on_error,
on_close=on_close, on_open=on_open,
header={'Authorization': 'Basic ' + self.a.decode('ascii')})
else:
ws = websocket.WebSocketApp(wsurl, on_message=on_message,
on_error=on_error,
subprotocols=['ISYSUB'],
on_close=on_close, on_open=on_open,
header={'Authorization': 'Basic ' + self.a.decode('ascii')})
break
except AttributeError as e:
logsupport.Logs.Log(self.hubname + " Problem starting WS handler - retrying: ", repr(e))
self.lastheartbeat = time.time()
ws.run_forever(ping_timeout=999, sslopt={"cert_reqs": ssl.CERT_NONE})
self.THstate = 'failed'
self.isy._HubOnline = False
sev = ConsoleWarning if config.sysStore.ErrLogReconnects else logsupport.ConsoleInfo
logsupport.Logs.Log(self.hubname + " QH Thread " + str(self.QHnum) + " exiting", severity=sev,
tb=False)
|
kevinkahn/softconsole
|
hubs/isy/isyeventmonitor.py
|
Python
|
apache-2.0
| 21,759
|
from .array_ import array # noqa: F401
from .base import ( # noqa: F401
ExtensionArray,
ExtensionOpsMixin,
ExtensionScalarOpsMixin,
)
from .categorical import Categorical # noqa: F401
from .datetimes import DatetimeArray # noqa: F401
from .integer import IntegerArray, integer_array # noqa: F401
from .interval import IntervalArray # noqa: F401
from .numpy_ import PandasArray, PandasDtype # noqa: F401
from .period import PeriodArray, period_array # noqa: F401
from .sparse import SparseArray # noqa: F401
from .timedeltas import TimedeltaArray # noqa: F401
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/core/arrays/__init__.py
|
Python
|
apache-2.0
| 581
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline utility functions."""
import json
import os
import sys
from absl import logging
import tensorflow.compat.v1 as tf
import tf_slim
from poem.core import common
from poem.core import keypoint_utils
from poem.core import tfe_input_layer
def read_batch_from_dataset_tables(input_table_patterns,
batch_sizes,
num_instances_per_record,
shuffle,
num_epochs,
keypoint_names_3d=None,
keypoint_names_2d=None,
min_keypoint_score_2d=-1.0,
shuffle_buffer_size=4096,
num_shards=1,
shard_index=None,
common_module=common,
dataset_class=tf.data.TFRecordDataset,
input_example_parser_creator=None,
seed=None):
"""Reads data from dataset table.
IMPORTANT: We assume that 2D keypoints from the input have been normalized by
image size. This function will reads image sizes from the input and
denormalize the 2D keypoints with them. No normalization is expected and no
denormalization will be performed for 3D keypoints.
Output tensors may include:
keypoints: A tensor for standardized 2D keypoints. Shape = [batch_size,
num_instances_per_record, num_keypoints_2d, 2].
keypoint_scores: A tensor for 2D keypoint scores. Shape = [batch_size,
num_instances_per_record, num_keypoints_2d].
keypoints_3d: A tensor for standardized 3D keypoints. Shape = [batch_size,
num_instances_per_record, num_keypoints_3d, 3].
Args:
input_table_patterns: A list of strings for the paths or pattern to input
tables.
batch_sizes: A list of integers for the batch sizes to read from each table.
num_instances_per_record: An integer for the number of instances per
tf.Example record.
shuffle: A boolean for whether to shuffle batch.
num_epochs: An integer for the number of epochs to read. Use None to read
indefinitely, in which case remainder batch will be dropped.
keypoint_names_3d: A list of strings for 3D keypoint names to read
(coordinates). Use None to skip reading 2D keypoints.
keypoint_names_2d: A list of strings for 2D keypoint names to read
(coordinates and scores). Use None to skip reading 2D keypoints.
min_keypoint_score_2d: A float for the minimum score to consider a 2D
keypoint as invalid.
shuffle_buffer_size: An integer for the buffer size used for shuffling. A
large buffer size benefits shuffling quality.
num_shards: An integer for the number of shards to divide the dataset. This
is useful to distributed training. See `tf.data.Dataset.shard` for
details.
shard_index: An integer for the shard index to use. This is useful to
distributed training, and should usually be set to the id of a
synchronized worker. See `tf.data.Dataset.shard` for details. Note this
must be specified if `num_shards` is greater than 1.
common_module: A Python module that defines common constants.
dataset_class: A dataset class to use. Must match input table type.
input_example_parser_creator: A function handle for creating parser
function. If None, uses the default parser creator.
seed: An integer for random seed.
Returns:
outputs: A dictionary for output tensor inputs.
"""
parser_kwargs = {
'num_objects': num_instances_per_record,
}
if keypoint_names_3d:
parser_kwargs.update({
'keypoint_names_3d': keypoint_names_3d,
'include_keypoint_scores_3d': False,
})
if keypoint_names_2d:
parser_kwargs.update({
'keypoint_names_2d': keypoint_names_2d,
'include_keypoint_scores_2d': True,
})
if input_example_parser_creator is None:
input_example_parser_creator = tfe_input_layer.create_tfe_parser
parser_fn = input_example_parser_creator(
common_module=common_module, **parser_kwargs)
# TODO(lzyuan): consider to refactor read_batch_from_batches into other file.
outputs = tfe_input_layer.read_batch_from_tables(
input_table_patterns,
batch_sizes=batch_sizes,
drop_remainder=num_epochs is None,
num_epochs=num_epochs,
num_shards=num_shards,
shard_index=shard_index,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
dataset_class=dataset_class,
parser_fn=parser_fn,
seed=seed)
outputs = tf.data.make_one_shot_iterator(outputs).get_next()
if keypoint_names_2d:
# Since we assume 2D keypoints from the input have been normalized by image
# size, so we need to denormalize them to restore correctly aspect ratio.
keypoints_2d = keypoint_utils.denormalize_points_by_image_size(
outputs[common_module.KEY_KEYPOINTS_2D],
image_sizes=outputs[common_module.KEY_IMAGE_SIZES])
keypoint_scores_2d = outputs[common_module.KEY_KEYPOINT_SCORES_2D]
if min_keypoint_score_2d < 0.0:
keypoint_masks_2d = tf.ones_like(keypoint_scores_2d, dtype=tf.float32)
else:
keypoint_masks_2d = tf.cast(
tf.math.greater_equal(keypoint_scores_2d, min_keypoint_score_2d),
dtype=tf.float32)
outputs.update({
common_module.KEY_KEYPOINTS_2D: keypoints_2d,
common_module.KEY_KEYPOINT_MASKS_2D: keypoint_masks_2d
})
return outputs
def get_learning_rate(schedule_type,
init_learning_rate,
global_step=None,
**kwargs):
"""Creates learning rate with schedules.
Currently supported schedules include:
'EXP_DECAY'
Args:
schedule_type: A string for the type of learning rate schedule to choose.
init_learning_rate: A float or tensor for the learning rate.
global_step: A tensor for the global step. If None, uses default value.
**kwargs: A dictionary of assorted arguments used by learning rate
schedulers, keyed in the format of '${schedule_type}_${arg}'.
Returns:
learning_rate: A learning rate tensor.
Raises:
ValueError: If the schedule type is not supported.
"""
if schedule_type == 'EXP_DECAY':
if global_step is None:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
init_learning_rate,
global_step=global_step,
decay_steps=kwargs.get('EXP_DECAY_decay_steps'),
decay_rate=kwargs.get('EXP_DECAY_decay_rate'),
staircase=kwargs.get('EXP_DECAY_staircase', False))
else:
raise ValueError('Unsupported optimizer type: `%s`.' % str(schedule_type))
return learning_rate
def get_optimizer(optimizer_type, learning_rate, **kwargs):
"""Creates optimizer with learning rate.
Currently supported optimizers include:
'ADAGRAD'
Args:
optimizer_type: A string for the type of optimizer to choose.
learning_rate: A float or tensor for the learning rate.
**kwargs: A dictionary of assorted arguments used by optimizers, keyed in
the format of '${optimizer_type}_${arg}'.
Returns:
optimizer: An optimizer class object.
Raises:
ValueError: If the optimizer type is not supported.
"""
if optimizer_type == 'ADAGRAD':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=kwargs.get(
'ADAGRAD_initial_accumulator_value', 0.1))
elif optimizer_type == 'ADAM':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=kwargs.get('ADAM_beta1', 0.9),
beta2=kwargs.get('ADAM_beta2', 0.999),
epsilon=kwargs.get('ADAM_epsilon', 1e-8))
elif optimizer_type == 'RMSPROP':
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
decay=kwargs.get('RMSPROP_decay', 0.9),
momentum=kwargs.get('RMSPROP_momentum', 0.9),
epsilon=kwargs.get('RMSPROP_epsilon', 1e-10))
else:
raise ValueError('Unsupported optimizer type: `%s`.' % str(optimizer_type))
return optimizer
def add_moving_average(decay):
"""Sets up exponential moving averages for training.
Args:
decay: A float as the moving average decay factor.
Returns:
train_op: An update training op object.
"""
variables_to_average = tf.trainable_variables()
variable_averages = tf.train.ExponentialMovingAverage(
decay, num_updates=tf.train.get_or_create_global_step())
train_op = variable_averages.apply(variables_to_average)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, train_op)
return train_op
def get_moving_average_variables_to_restore(global_step=None):
"""Gets variables to restore.
Args:
global_step: A tensor of global step to include. If None, do not restore
global step variable, which is for exporting inference graph. For only
evaluation, specifying a global step is needed.
Returns:
variables_to_restore: A dictionary of variables to restore.
"""
variable_averages = tf.train.ExponentialMovingAverage(0.0, global_step)
variables_to_restore = variable_averages.variables_to_restore()
if global_step is not None:
variables_to_restore[global_step.op.name] = global_step
return variables_to_restore
def get_init_fn(train_dir=None,
model_checkpoint=None,
exclude_list=None,
include_list=None,
reset_global_step_if_necessary=True,
ignore_missing_vars=True):
"""Gets model initializer function.
The initialization logic is as follows:
1. If a checkpoint is found in `train_dir`, initialize from it.
2. Otherwise, if `model_checkpoint` is valid, initialize from it, and reset
global step if necessary.
3. Otherwise, do not initialize from any checkpoint.
Args:
train_dir: A string as the path to an existing training directory to resume.
Use None to skip.
model_checkpoint: A string as the path to an existing model checkpoint to
initialize from. Use None to skip.
exclude_list: A list of strings for the names of variables not to load.
include_list: A list of strings for the names of variables to load. Use
None to load all variables.
reset_global_step_if_necessary: A boolean for whether to reset global step.
Only used in the case of initializing from an existing checkpoint
`model_checkpoint` rather than resuming training from `train_dir`.
ignore_missing_vars: A boolean for whether to ignore missing variables. If
False, errors will be raised if there is a missing variable.
Returns:
An model initializer function if an existing checkpoint is found. None
otherwise.
"""
# Make sure the exclude list is a list.
if not exclude_list:
exclude_list = []
if train_dir:
train_checkpoint = tf.train.latest_checkpoint(train_dir)
if train_checkpoint:
model_checkpoint = train_checkpoint
logging.info('Resume latest training checkpoint in: %s.', train_dir)
elif model_checkpoint:
logging.info('Use initial checkpoint: %s.', model_checkpoint)
if reset_global_step_if_necessary:
exclude_list.append('global_step')
logging.info('Reset global step.')
elif model_checkpoint:
logging.info('Use initial checkpoint: %s.', model_checkpoint)
if reset_global_step_if_necessary:
exclude_list.append('global_step')
logging.info('Reset global step.')
if not model_checkpoint:
logging.info('Do not initialize from a checkpoint.')
return None
variables_to_restore = tf_slim.get_variables_to_restore(
include=include_list, exclude=exclude_list)
return tf_slim.assign_from_checkpoint_fn(
model_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
def add_summary(scalars_to_summarize=None,
histograms_to_summarize=None,
images_to_summarize=None):
"""Adds summaries to the default summary collection.
Args:
scalars_to_summarize: A dictionary of (name, scalar tensor) tuples to
summarize.
histograms_to_summarize: A dictionary of (name, histogram tensor) tuples to
summarize.
images_to_summarize: A dictionary of (name, image tensor) tuples to
summarize.
"""
if scalars_to_summarize:
for key, value in scalars_to_summarize.items():
tf.summary.scalar(key, value)
if histograms_to_summarize:
for key, value in histograms_to_summarize.items():
tf.summary.histogram(key, value)
if images_to_summarize:
for key, value in images_to_summarize.items():
tf.summary.image(key, value)
def profile(graph=None, variables=None):
"""Profiles model sizes and computation.
Args:
graph: A Tensorflow Graph to profile. If None, use the default graph.
variables: A list of model variables to profile. If None, use the default
model variable list.
"""
if graph is None:
graph = tf.get_default_graph()
tf_slim.model_analyzer.analyze_ops(graph, print_info=True)
if variables is None:
variables = tf.model_variables()
tf_slim.model_analyzer.analyze_vars(variables, print_info=True)
def create_dir_and_save_flags(flags_module, log_dir, json_filename):
"""Creates log directory and saves flags to a JSON file.
Args:
flags_module: An absl.flags module.
log_dir: A string for log directory.
json_filename: A string for output JSON file name.
"""
# Create log directory if necessary.
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
# Save all key flags.
key_flag_dict = {
flag.name: flag.value
for flag in flags_module.FLAGS.get_key_flags_for_module(sys.argv[0])
}
json_path = os.path.join(log_dir, json_filename)
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(key_flag_dict, f, indent=2, sort_keys=True)
def get_embedding_keys(distance_type,
replace_samples_with_means=False,
common_module=common):
"""Gets model embedding output keys based on distance type.
Args:
distance_type: An enum string for distance type.
replace_samples_with_means: A boolean for whether to replace embedding
sample keys with embedding mean keys.
common_module: A Python module that defines common flags and constants.
Returns:
A list for enum strings for model embedding output keys.
"""
if distance_type == common_module.DISTANCE_TYPE_CENTER:
return [common_module.KEY_EMBEDDING_MEANS]
if distance_type == common_module.DISTANCE_TYPE_SAMPLE:
return [common_module.KEY_EMBEDDING_SAMPLES]
# distance_type == common_module.DISTANCE_TYPE_CENTER_AND_SAMPLE.
return [
common_module.KEY_EMBEDDING_MEANS, common_module.KEY_EMBEDDING_STDDEVS,
(common_module.KEY_EMBEDDING_MEANS
if replace_samples_with_means else common_module.KEY_EMBEDDING_SAMPLES)
]
def stack_embeddings(model_outputs, embedding_keys, common_module=common):
"""Selects and stacks embeddings by key.
Args:
model_outputs: A dictionary for model output tensors.
embedding_keys: A list for enum strings for tensor keys to select.
common_module: A Python module that defines common flags and constants.
Returns:
A tensor for stacked embeddings. Shape = [..., num_embeddings_per_instance,
embedding_dim].
"""
embeddings_to_stack = []
for key in embedding_keys:
if key in [
common_module.KEY_EMBEDDING_MEANS, common_module.KEY_EMBEDDING_STDDEVS
]:
embeddings_to_stack.append(tf.expand_dims(model_outputs[key], axis=-2))
elif key == common_module.KEY_EMBEDDING_SAMPLES:
embeddings_to_stack.append(model_outputs[key])
else:
raise ValueError('Unsupported embedding key: `%s`.' % str(key))
return tf.concat(embeddings_to_stack, axis=-2)
def get_sigmoid_parameters(name,
raw_a_initial_value=0.0,
b_initial_value=0.0,
a_range=(None, None),
b_range=(None, None),
reuse=tf.AUTO_REUSE):
"""Gets sigmoid parameter variables.
Args:
name: A string for the variable scope name.
raw_a_initial_value: A float for initial value of the raw `a` parameter.
b_initial_value: A float for initial value of the `b` parameter.
a_range: A tuple of (min, max) range of `a` parameter. Uses None or
non-positive value to indicate unspecified boundaries.
b_range: A tuple of (min, max) range of `b` parameter. Uses None to indicate
unspecified boundaries. Does NOT use non-positive value to indicate
unspecified boundaries.
reuse: Type of variable reuse.
Returns:
raw_a: A variable for `raw_a` parameter.
a: A tensor for `a` parameter.
b: A tensor for `b` parameter.
Raises:
ValueError: If `a_range` or `b_range` is invalid.
"""
def maybe_clamp(x, x_range, ignored_if_non_positive):
"""Clamps `x` to `x_range`."""
x_min, x_max = x_range
if x_min is not None and x_max is not None and x_min > x_max:
raise ValueError('Invalid range: %s.' % str(x_range))
if (x_min is not None) and (not ignored_if_non_positive or x_min > 0.0):
x = tf.math.maximum(x_min, x)
if (x_max is not None) and (not ignored_if_non_positive or x_max > 0.0):
x = tf.math.minimum(x_max, x)
return x
with tf.variable_scope(name, reuse=reuse):
# TODO(liuti): Currently the variable for `raw_a` is named `a` in
# checkpoints for historic reasons. Consolidate the naming.
raw_a = tf.get_variable(
'a',
shape=[],
dtype=tf.float32,
initializer=tf.initializers.constant(raw_a_initial_value))
a = tf.nn.elu(raw_a) + 1.0
a = maybe_clamp(a, a_range, ignored_if_non_positive=True)
b = tf.get_variable(
'b',
shape=[],
dtype=tf.float32,
initializer=tf.initializers.constant(b_initial_value))
b = maybe_clamp(b, b_range, ignored_if_non_positive=False)
return raw_a, a, b
|
google-research/google-research
|
poem/core/pipeline_utils.py
|
Python
|
apache-2.0
| 18,823
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from helpers import unittest
import luigi.notifications
from luigi.scheduler import DISABLED, DONE, FAILED, CentralPlannerScheduler
luigi.notifications.DEBUG = True
WORKER = 'myworker'
class CentralPlannerTest(unittest.TestCase):
def setUp(self):
super(CentralPlannerTest, self).setUp()
conf = self.get_scheduler_config()
self.sch = CentralPlannerScheduler(**conf)
self.time = time.time
def get_scheduler_config(self):
return {
'retry_delay': 100,
'remove_delay': 1000,
'worker_disconnect_delay': 10,
'disable_persist': 10,
'disable_window': 10,
'disable_failures': 3,
}
def tearDown(self):
super(CentralPlannerTest, self).tearDown()
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_failed_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_broken_dep(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A', runnable=False)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None) # can still wait and retry: TODO: do we want this?
self.sch.add_task(WORKER, 'A', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'B')
self.sch.add_task(WORKER, 'B', DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_two_workers(self):
# Worker X wants to build A -> B
# Worker Y wants to build A -> C
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.sch.add_task(task_id='B', deps=('A',), worker='X')
self.sch.add_task(task_id='C', deps=('A',), worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None) # Worker Y is pending on A to be done
self.sch.add_task(worker='X', task_id='A', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'C')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'B')
def test_retry(self):
# Try to build A but fails, will retry after 100s
self.setTime(0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', FAILED)
for t in range(100):
self.setTime(t)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
self.sch.ping(WORKER)
if t % 10 == 0:
self.sch.prune()
self.setTime(101)
self.sch.prune()
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disconnect_running(self):
# X and Y wants to run A.
# X starts but does not report back. Y does.
# After some timeout, Y will build it instead
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.sch.add_task(task_id='A', worker='Y')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
for t in range(200):
self.setTime(t)
self.sch.ping(worker='Y')
if t % 10 == 0:
self.sch.prune()
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'A')
def test_remove_dep(self):
# X schedules A -> B, A is broken
# Y schedules C -> B: this should remove A as a dep of B
self.sch.add_task(task_id='A', worker='X', runnable=False)
self.sch.add_task(task_id='B', deps=('A',), worker='X')
# X can't build anything
self.assertEqual(self.sch.get_work(worker='X')['task_id'], None)
self.sch.add_task(task_id='B', deps=('C',), worker='Y') # should reset dependencies for A
self.sch.add_task(task_id='C', worker='Y', status=DONE)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_timeout(self):
# A bug that was earlier present when restarting the same flow
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.setTime(10000)
self.sch.add_task(task_id='A', worker='Y') # Will timeout X but not schedule A for removal
for i in range(2000):
self.setTime(10000 + i)
self.sch.ping(worker='Y')
self.sch.add_task(task_id='A', status=DONE, worker='Y') # This used to raise an exception since A was removed
def test_disallowed_state_changes(self):
# Test that we can not schedule an already running task
t = 'A'
self.sch.add_task(task_id=t, worker='X')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], t)
self.sch.add_task(task_id=t, worker='Y')
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], None)
def test_two_worker_info(self):
# Make sure the scheduler returns info that some other worker is running task A
self.sch.add_task(worker='X', task_id='A')
self.sch.add_task(worker='Y', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
r = self.sch.get_work(worker='Y')
self.assertEqual(r['task_id'], None) # Worker Y is pending on A to be done
s = r['running_tasks'][0]
self.assertEqual(s['task_id'], 'A')
self.assertEqual(s['worker'], 'X')
def test_assistant_get_work(self):
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work('Y', assistant=True)['task_id'], 'A')
# check that the scheduler recognizes tasks as running
running_tasks = self.sch.task_list('RUNNING', '')
self.assertEqual(len(running_tasks), 1)
self.assertEqual(list(running_tasks.keys()), ['A'])
self.assertEqual(running_tasks['A']['worker_running'], 'Y')
def test_assistant_get_work_external_task(self):
self.sch.add_task('X', task_id='A', runnable=False)
self.assertTrue(self.sch.get_work('Y', assistant=True)['task_id'] is None)
def test_task_fails_when_assistant_dies(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.sch.add_worker('Y', [])
self.assertEqual(self.sch.get_work('Y', assistant=True)['task_id'], 'A')
self.assertEqual(list(self.sch.task_list('RUNNING', '').keys()), ['A'])
# Y dies for 50 seconds, X stays alive
self.setTime(50)
self.sch.ping('X')
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), ['A'])
def test_prune_with_live_assistant(self):
self.setTime(0)
self.sch.add_task(worker='X', task_id='A')
self.sch.get_work('Y', assistant=True)
self.sch.add_task(worker='Y', task_id='A', status=DONE, assistant=True)
# worker X stops communicating, A should be marked for removal
self.setTime(600)
self.sch.ping('Y')
self.sch.prune()
# A will now be pruned
self.setTime(2000)
self.sch.prune()
self.assertFalse(list(self.sch.task_list('', '')))
def test_prune_done_tasks(self, expected=None):
self.setTime(0)
self.sch.add_task(WORKER, task_id='A', status=DONE)
self.sch.add_task(WORKER, task_id='B', deps=['A'], status=DONE)
self.sch.add_task(WORKER, task_id='C', deps=['B'])
self.setTime(600)
self.sch.ping('ASSISTANT')
self.sch.prune()
self.setTime(2000)
self.sch.ping('ASSISTANT')
self.sch.prune()
self.assertEqual(set(expected or ()), set(self.sch.task_list('', '').keys()))
def test_keep_tasks_for_assistant(self):
self.sch.get_work('ASSISTANT', assistant=True) # tell the scheduler this is an assistant
self.test_prune_done_tasks(['B', 'C'])
def test_keep_scheduler_disabled_tasks_for_assistant(self):
self.sch.get_work('ASSISTANT', assistant=True) # tell the scheduler this is an assistant
# create a scheduler disabled task and a worker disabled task
for i in range(10):
self.sch.add_task(WORKER, 'D', status=FAILED)
self.sch.add_task(WORKER, 'E', status=DISABLED)
# scheduler prunes the worker disabled task
self.assertEqual(set(['D', 'E']), set(self.sch.task_list(DISABLED, '')))
self.test_prune_done_tasks(['B', 'C', 'D'])
def test_keep_failed_tasks_for_assistant(self):
self.sch.get_work('ASSISTANT', assistant=True) # tell the scheduler this is an assistant
self.sch.add_task(WORKER, 'D', status=FAILED, deps='A')
self.test_prune_done_tasks(['A', 'B', 'C', 'D'])
def test_scheduler_resources_none_allow_one(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_resources_none_disallow_two(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.assertFalse(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_insufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=2)
self.assertFalse(self.sch.get_work(worker='X')['task_id'])
def test_scheduler_with_sufficient_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 3})
self.sch.update_resources(R1=3)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
def test_scheduler_with_resources_used(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 1})
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R1': 1})
self.sch.update_resources(R1=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
def test_scheduler_overprovisioned_on_other_resource(self):
self.sch.add_task(worker='X', task_id='A', resources={'R1': 2})
self.sch.update_resources(R1=2)
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='Y', task_id='B', resources={'R2': 2})
self.sch.update_resources(R1=1, R2=2)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'B')
def test_scheduler_with_priority_and_competing_resources(self):
self.sch.add_task(worker='X', task_id='A')
self.assertEqual(self.sch.get_work(worker='X')['task_id'], 'A')
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
self.sch.add_task(worker='Y', task_id='D', priority=0)
self.assertEqual(self.sch.get_work(worker='Y')['task_id'], 'D')
def test_do_not_lock_resources_when_not_ready(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
def test_lock_resources_when_one_of_multiple_workers_is_ready(self):
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 2)])
self.sch.add_worker('Y', [])
self.assertFalse(self.sch.get_work('Y')['task_id'])
def test_do_not_lock_resources_while_running_higher_priority(self):
""" Test to make sure that resources won't go unused waiting on workers """
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.assertEqual('C', self.sch.get_work('Y')['task_id'])
def test_lock_resources_while_running_lower_priority(self):
""" Make sure resources will be made available while working on lower priority tasks """
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work('Y')['task_id'])
def test_lock_resources_for_second_worker(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1})
self.sch.add_task(worker='X', task_id='B', resources={'R': 1})
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=10)
self.sch.add_worker('X', {'workers': 2})
self.sch.add_worker('Y', {'workers': 1})
self.sch.update_resources(R=2)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.assertFalse(self.sch.get_work('X')['task_id'])
def test_can_work_on_lower_priority_while_waiting_for_resources(self):
self.sch.add_task(worker='X', task_id='A', resources={'R': 1}, priority=0)
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(worker='Y', task_id='B', resources={'R': 1}, priority=10)
self.sch.add_task(worker='Y', task_id='C', priority=0)
self.sch.update_resources(R=1)
self.assertEqual('C', self.sch.get_work('Y')['task_id'])
def test_priority_update_with_pruning(self):
self.setTime(0)
self.sch.add_task(task_id='A', worker='X')
self.setTime(50) # after worker disconnects
self.sch.prune()
self.sch.add_task(task_id='B', deps=['A'], worker='X')
self.setTime(2000) # after remove for task A
self.sch.prune()
# Here task A that B depends on is missing
self.sch.add_task(WORKER, task_id='C', deps=['B'], priority=100)
self.sch.add_task(WORKER, task_id='B', deps=['A'])
self.sch.add_task(WORKER, task_id='A')
self.sch.add_task(WORKER, task_id='D', priority=10)
self.check_task_order('ABCD')
def test_update_resources(self):
self.sch.add_task(WORKER, task_id='A', deps=['B'])
self.sch.add_task(WORKER, task_id='B', resources={'r': 2})
self.sch.update_resources(r=1)
# B requires too many resources, we can't schedule
self.check_task_order([])
self.sch.add_task(WORKER, task_id='B', resources={'r': 1})
# now we have enough resources
self.check_task_order(['B', 'A'])
def test_hendle_multiple_resources(self):
self.sch.add_task(WORKER, task_id='A', resources={'r1': 1, 'r2': 1})
self.sch.add_task(WORKER, task_id='B', resources={'r1': 1, 'r2': 1})
self.sch.add_task(WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=2, r2=1)
self.assertEqual('A', self.sch.get_work(WORKER)['task_id'])
self.check_task_order('C')
def test_single_resource_lock(self):
self.sch.add_task('X', task_id='A', resources={'r': 1})
self.assertEqual('A', self.sch.get_work('X')['task_id'])
self.sch.add_task(WORKER, task_id='B', resources={'r': 2}, priority=10)
self.sch.add_task(WORKER, task_id='C', resources={'r': 1})
self.sch.update_resources(r=2)
# Should wait for 2 units of r to be available for B before scheduling C
self.check_task_order([])
def test_no_lock_if_too_many_resources_required(self):
self.sch.add_task(WORKER, task_id='A', resources={'r': 2}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r': 1})
self.sch.update_resources(r=1)
self.check_task_order('B')
def test_multiple_resources_lock(self):
self.sch.add_task('X', task_id='A', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r2': 1})
self.sch.add_task(WORKER, task_id='C', resources={'r1': 1})
self.sch.update_resources(r1=1, r2=1)
# should preserve both resources for worker 'X'
self.check_task_order([])
def test_multiple_resources_no_lock(self):
self.sch.add_task(WORKER, task_id='A', resources={'r1': 1}, priority=10)
self.sch.add_task(WORKER, task_id='B', resources={'r1': 1, 'r2': 1}, priority=10)
self.sch.add_task(WORKER, task_id='C', resources={'r2': 1})
self.sch.update_resources(r1=1, r2=2)
self.assertEqual('A', self.sch.get_work(WORKER)['task_id'])
# C doesn't block B, so it can go first
self.check_task_order('C')
def check_task_order(self, order):
for expected_id in order:
self.assertEqual(self.sch.get_work(WORKER)['task_id'], expected_id)
self.sch.add_task(WORKER, expected_id, status=DONE)
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_priorities(self):
self.sch.add_task(WORKER, 'A', priority=10)
self.sch.add_task(WORKER, 'B', priority=5)
self.sch.add_task(WORKER, 'C', priority=15)
self.sch.add_task(WORKER, 'D', priority=9)
self.check_task_order(['C', 'A', 'D', 'B'])
def test_priorities_default_and_negative(self):
self.sch.add_task(WORKER, 'A', priority=10)
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C', priority=15)
self.sch.add_task(WORKER, 'D', priority=-20)
self.sch.add_task(WORKER, 'E', priority=1)
self.check_task_order(['C', 'A', 'E', 'B', 'D'])
def test_priorities_and_dependencies(self):
self.sch.add_task(WORKER, 'A', deps=['Z'], priority=10)
self.sch.add_task(WORKER, 'B', priority=5)
self.sch.add_task(WORKER, 'C', deps=['Z'], priority=3)
self.sch.add_task(WORKER, 'D', priority=2)
self.sch.add_task(WORKER, 'Z', priority=1)
self.check_task_order(['Z', 'A', 'B', 'C', 'D'])
def test_priority_update_dependency_after_scheduling(self):
self.sch.add_task(WORKER, 'A', priority=1)
self.sch.add_task(WORKER, 'B', priority=5, deps=['A'])
self.sch.add_task(WORKER, 'C', priority=10, deps=['B'])
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['A', 'B', 'C', 'D'])
def test_disable(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_disable_and_reenable(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disable_and_reenable_and_disable_again(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.re_enable_task('A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be still enabled
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled now
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], None)
def test_disable_and_done(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'A', status=FAILED)
# should be disabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.assertEqual(len(self.sch.task_list('FAILED', '')), 0)
self.sch.add_task(WORKER, 'A', status=DONE)
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.assertEqual(len(self.sch.task_list('DONE', '')), 1)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_disable_by_worker(self):
self.sch.add_task(WORKER, 'A', status=DISABLED)
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 1)
self.sch.add_task(WORKER, 'A')
# should be enabled at this point
self.assertEqual(len(self.sch.task_list('DISABLED', '')), 0)
self.sch.add_task(WORKER, 'A')
self.assertEqual(self.sch.get_work(WORKER)['task_id'], 'A')
def test_task_list_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(WORKER, c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '', False).keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('PENDING', ''))
def test_task_list_within_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=4)
for c in 'ABCD':
sch.add_task(WORKER, c)
self.assertEqual(set('ABCD'), set(sch.task_list('PENDING', '').keys()))
def test_task_lists_some_beyond_limit(self):
sch = CentralPlannerScheduler(max_shown_tasks=3)
for c in 'ABCD':
sch.add_task(WORKER, c, 'DONE')
for c in 'EFG':
sch.add_task(WORKER, c)
self.assertEqual(set('EFG'), set(sch.task_list('PENDING', '').keys()))
self.assertEqual({'num_tasks': 4}, sch.task_list('DONE', ''))
def test_priority_update_dependency_chain(self):
self.sch.add_task(WORKER, 'A', priority=10, deps=['B'])
self.sch.add_task(WORKER, 'B', priority=5, deps=['C'])
self.sch.add_task(WORKER, 'C', priority=1)
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['C', 'B', 'A', 'D'])
def test_priority_no_decrease_with_multiple_updates(self):
self.sch.add_task(WORKER, 'A', priority=1)
self.sch.add_task(WORKER, 'B', priority=10, deps=['A'])
self.sch.add_task(WORKER, 'C', priority=5, deps=['A'])
self.sch.add_task(WORKER, 'D', priority=6)
self.check_task_order(['A', 'B', 'D', 'C'])
def test_unique_tasks(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER + "_2", 'B')
response = self.sch.get_work(WORKER)
self.assertEqual(3, response['n_pending_tasks'])
self.assertEqual(2, response['n_unique_pending'])
def test_pending_downstream_disable(self):
self.sch.add_task(WORKER, 'A', status=DISABLED)
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'C', deps=('B',))
response = self.sch.get_work(WORKER)
self.assertTrue(response['task_id'] is None)
self.assertEqual(0, response['n_pending_tasks'])
self.assertEqual(0, response['n_unique_pending'])
def test_pending_downstream_failure(self):
self.sch.add_task(WORKER, 'A', status=FAILED)
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'C', deps=('B',))
response = self.sch.get_work(WORKER)
self.assertTrue(response['task_id'] is None)
self.assertEqual(2, response['n_pending_tasks'])
self.assertEqual(2, response['n_unique_pending'])
def test_prefer_more_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C', deps=['B'])
self.sch.add_task(WORKER, 'D', deps=['B'])
self.sch.add_task(WORKER, 'E', deps=['A'])
self.check_task_order('BACDE')
def test_prefer_readier_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER, 'D')
self.sch.add_task(WORKER, 'F', deps=['A', 'B', 'C'])
self.sch.add_task(WORKER, 'G', deps=['A', 'B', 'C'])
self.sch.add_task(WORKER, 'E', deps=['D'])
self.check_task_order('DABCFGE')
def test_ignore_done_dependents(self):
self.sch.add_task(WORKER, 'A')
self.sch.add_task(WORKER, 'B')
self.sch.add_task(WORKER, 'C')
self.sch.add_task(WORKER, 'D', priority=1)
self.sch.add_task(WORKER, 'E', deps=['C', 'D'])
self.sch.add_task(WORKER, 'F', deps=['A', 'B'])
self.check_task_order('DCABEF')
def test_task_list_no_deps(self):
self.sch.add_task(WORKER, 'B', deps=('A',))
self.sch.add_task(WORKER, 'A')
task_list = self.sch.task_list('PENDING', '')
self.assertFalse('deps' in task_list['A'])
if __name__ == '__main__':
unittest.main()
|
hellais/luigi
|
test/central_planner_test.py
|
Python
|
apache-2.0
| 28,970
|
# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-cli/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import iso8601
from brkt_cli import CLIConfig
from brkt_cli import version
class TestVersionCheck(unittest.TestCase):
def test_is_version_supported(self):
supported = [
'0.9.8', '0.9.9', '0.9.9.1', '0.9.10', '0.9.11', '0.9.12'
]
self.assertFalse(
version._is_version_supported('0.9.7', supported)
)
self.assertTrue(
version._is_version_supported('0.9.8', supported)
)
self.assertTrue(
version._is_version_supported('0.9.12', supported)
)
self.assertTrue(
version._is_version_supported('0.9.13pre1', supported)
)
self.assertTrue(
version._is_version_supported('0.9.13', supported)
)
def test_is_later_version_available(self):
supported = [
'0.9.8', '0.9.9', '0.9.9.1', '0.9.10', '0.9.11', '0.9.12'
]
self.assertTrue(
version._is_later_version_available('0.9.11', supported)
)
self.assertFalse(
version._is_later_version_available('0.9.12', supported)
)
self.assertFalse(
version._is_later_version_available('0.9.13pre1', supported)
)
def test_version_check_time(self):
cfg = CLIConfig()
# Not set.
self.assertIsNone(version.get_last_version_check_time(cfg))
# Set.
before = datetime.datetime.now(tz=iso8601.UTC)
version.set_last_version_check_time(cfg)
t = version.get_last_version_check_time(cfg)
self.assertIsNotNone(t)
after = datetime.datetime.now(tz=iso8601.UTC)
self.assertTrue(before <= t <= after)
def test_is_version_check_needed(self):
cfg = CLIConfig()
# Not set.
self.assertTrue(version.is_version_check_needed(cfg))
# Set to 25 hours ago.
now = datetime.datetime.now(tz=iso8601.UTC)
dt = now - datetime.timedelta(hours=25)
version.set_last_version_check_time(cfg, dt=dt)
self.assertTrue(version.is_version_check_needed(cfg))
# Set to 23 hours ago.
dt = now - datetime.timedelta(hours=23)
version.set_last_version_check_time(cfg, dt=dt)
self.assertFalse(version.is_version_check_needed(cfg))
|
brkt/brkt-cli
|
brkt_cli/test_version.py
|
Python
|
apache-2.0
| 2,893
|
"""Single and multi-threaded executors."""
import datetime
import functools
import logging
import math
import os
import threading
from abc import ABCMeta, abstractmethod
from threading import Lock
from typing import (
Dict,
Iterable,
List,
MutableSequence,
Optional,
Set,
Tuple,
Union,
cast,
)
import psutil
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine
from .command_line_tool import CallbackJob, ExpressionJob
from .context import RuntimeContext, getdefault
from .errors import WorkflowException
from .job import JobBase
from .loghandler import _logger
from .mutation import MutationManager
from .process import Process, cleanIntermediate, relocateOutputs
from .provenance_profile import ProvenanceProfile
from .task_queue import TaskQueue
from .update import ORIGINAL_CWLVERSION
from .utils import CWLObjectType, JobsType
from .workflow import Workflow
from .workflow_job import WorkflowJob, WorkflowJobStep
TMPDIR_LOCK = Lock()
class JobExecutor(metaclass=ABCMeta):
"""Abstract base job executor."""
def __init__(self) -> None:
"""Initialize."""
self.final_output = [] # type: MutableSequence[Optional[CWLObjectType]]
self.final_status = [] # type: List[str]
self.output_dirs = set() # type: Set[str]
def __call__(
self,
process: Process,
job_order_object: CWLObjectType,
runtime_context: RuntimeContext,
logger: logging.Logger = _logger,
) -> Tuple[Optional[CWLObjectType], str]:
return self.execute(process, job_order_object, runtime_context, logger)
def output_callback(
self, out: Optional[CWLObjectType], process_status: str
) -> None:
"""Collect the final status and outputs."""
self.final_status.append(process_status)
self.final_output.append(out)
@abstractmethod
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
"""Execute the jobs for the given Process."""
def execute(
self,
process: Process,
job_order_object: CWLObjectType,
runtime_context: RuntimeContext,
logger: logging.Logger = _logger,
) -> Tuple[Union[Optional[CWLObjectType]], str]:
"""Execute the process."""
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
def check_for_abstract_op(tool: CWLObjectType) -> None:
if tool["class"] == "Operation":
raise SourceLine(
tool, "class", WorkflowException, runtime_context.debug
).makeError("Workflow has unrunnable abstract Operation")
process.visit(check_for_abstract_op)
finaloutdir = None # Type: Optional[str]
original_outdir = runtime_context.outdir
if isinstance(original_outdir, str):
finaloutdir = os.path.abspath(original_outdir)
runtime_context = runtime_context.copy()
outdir = runtime_context.create_outdir()
self.output_dirs.add(outdir)
runtime_context.outdir = outdir
runtime_context.mutation_manager = MutationManager()
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
job_reqs = None # type: Optional[List[CWLObjectType]]
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = cast(
List[CWLObjectType],
job_order_object["https://w3id.org/cwl/cwl#requirements"],
)
elif (
"cwl:defaults" in process.metadata
and "https://w3id.org/cwl/cwl#requirements"
in cast(CWLObjectType, process.metadata["cwl:defaults"])
):
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1"
)
job_reqs = cast(
Optional[List[CWLObjectType]],
cast(CWLObjectType, process.metadata["cwl:defaults"])[
"https://w3id.org/cwl/cwl#requirements"
],
)
if job_reqs is not None:
for req in job_reqs:
process.requirements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
if (
self.final_output
and self.final_output[0] is not None
and finaloutdir is not None
):
self.final_output[0] = relocateOutputs(
self.final_output[0],
finaloutdir,
self.output_dirs,
runtime_context.move_outputs,
runtime_context.make_fs_access(""),
getdefault(runtime_context.compute_checksum, True),
path_mapper=runtime_context.path_mapper,
)
if runtime_context.rm_tmpdir:
if not runtime_context.cachedir:
output_dirs = self.output_dirs # type: Iterable[str]
else:
output_dirs = filter(
lambda x: not x.startswith(runtime_context.cachedir), # type: ignore
self.output_dirs,
)
cleanIntermediate(output_dirs)
if self.final_output and self.final_status:
if (
runtime_context.research_obj is not None
and isinstance(
process, (JobBase, Process, WorkflowJobStep, WorkflowJob)
)
and process.parent_wf
):
process_run_id = None # type: Optional[str]
name = "primary"
process.parent_wf.generate_output_prov(
self.final_output[0], process_run_id, name
)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri,
None,
process.parent_wf.engine_uuid,
datetime.datetime.now(),
)
process.parent_wf.finalize_prov_profile(name=None)
return (self.final_output[0], self.final_status[0])
return (None, "permanentFail")
class SingleJobExecutor(JobExecutor):
"""Default single-threaded CWL reference executor."""
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
process_run_id = None # type: Optional[str]
# define provenance profile for single commandline tool
if (
not isinstance(process, Workflow)
and runtime_context.research_obj is not None
):
process.provenance_object = ProvenanceProfile(
runtime_context.research_obj,
full_name=runtime_context.cwl_full_name,
host_provenance=False,
user_provenance=False,
orcid=runtime_context.orcid,
# single tool execution, so RO UUID = wf UUID = tool UUID
run_uuid=runtime_context.research_obj.ro_uuid,
fsaccess=runtime_context.make_fs_access(""),
)
process.parent_wf = process.provenance_object
jobiter = process.job(job_order_object, self.output_callback, runtime_context)
try:
for job in jobiter:
if job is not None:
if runtime_context.builder is not None and hasattr(job, "builder"):
job.builder = runtime_context.builder # type: ignore
if job.outdir is not None:
self.output_dirs.add(job.outdir)
if runtime_context.research_obj is not None:
if not isinstance(process, Workflow):
prov_obj = process.provenance_object
else:
prov_obj = job.prov_obj
if prov_obj:
runtime_context.prov_obj = prov_obj
prov_obj.fsaccess = runtime_context.make_fs_access("")
prov_obj.evaluate(
process,
job,
job_order_object,
runtime_context.research_obj,
)
process_run_id = prov_obj.record_process_start(process, job)
runtime_context = runtime_context.copy()
runtime_context.process_run_id = process_run_id
job.run(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
except (
ValidationException,
WorkflowException,
): # pylint: disable=try-except-raise
raise
except Exception as err:
logger.exception("Got workflow error")
raise WorkflowException(str(err)) from err
class MultithreadedJobExecutor(JobExecutor):
"""
Experimental multi-threaded CWL executor.
Does simple resource accounting, will not start a job unless it
has cores / ram available, but does not make any attempt to
optimize usage.
"""
def __init__(self) -> None:
"""Initialize."""
super().__init__()
self.exceptions = [] # type: List[WorkflowException]
self.pending_jobs = [] # type: List[JobsType]
self.pending_jobs_lock = threading.Lock()
self.max_ram = int(psutil.virtual_memory().available / 2**20) # type: ignore[no-untyped-call]
self.max_cores = float(psutil.cpu_count())
self.allocated_ram = float(0)
self.allocated_cores = float(0)
def select_resources(
self, request: Dict[str, Union[int, float]], runtime_context: RuntimeContext
) -> Dict[str, Union[int, float]]: # pylint: disable=unused-argument
"""Naïve check for available cpu cores and memory."""
result: Dict[str, Union[int, float]] = {}
maxrsc = {"cores": self.max_cores, "ram": self.max_ram}
for rsc in ("cores", "ram"):
rsc_min = request[rsc + "Min"]
if rsc_min > maxrsc[rsc]:
raise WorkflowException(
f"Requested at least {rsc_min} {rsc} but only "
f"{maxrsc[rsc]} available"
)
rsc_max = request[rsc + "Max"]
if rsc_max < maxrsc[rsc]:
result[rsc] = math.ceil(rsc_max)
else:
result[rsc] = maxrsc[rsc]
result["tmpdirSize"] = math.ceil(request["tmpdirMin"])
result["outdirSize"] = math.ceil(request["outdirMin"])
if "cudaDeviceCount" in request:
result["cudaDeviceCount"] = request["cudaDeviceCount"]
return result
def _runner(self, job, runtime_context, TMPDIR_LOCK):
# type: (Union[JobBase, WorkflowJob, CallbackJob, ExpressionJob], RuntimeContext, threading.Lock) -> None
"""Job running thread."""
try:
_logger.debug(
"job: {}, runtime_context: {}, TMPDIR_LOCK: {}".format(
job, runtime_context, TMPDIR_LOCK
)
)
job.run(runtime_context, TMPDIR_LOCK)
except WorkflowException as err:
_logger.exception(f"Got workflow error: {err}")
self.exceptions.append(err)
except Exception as err: # pylint: disable=broad-except
_logger.exception(f"Got workflow error: {err}")
self.exceptions.append(WorkflowException(str(err)))
finally:
if runtime_context.workflow_eval_lock:
with runtime_context.workflow_eval_lock:
if isinstance(job, JobBase):
ram = job.builder.resources["ram"]
self.allocated_ram -= ram
cores = job.builder.resources["cores"]
self.allocated_cores -= cores
runtime_context.workflow_eval_lock.notifyAll()
def run_job(
self,
job: Optional[JobsType],
runtime_context: RuntimeContext,
) -> None:
"""Execute a single Job in a separate thread."""
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
with self.pending_jobs_lock:
n = 0
while (n + 1) <= len(self.pending_jobs):
# Simple greedy resource allocation strategy. Go
# through pending jobs in the order they were
# generated and add them to the queue only if there
# are resources available.
job = self.pending_jobs[n]
if isinstance(job, JobBase):
ram = job.builder.resources["ram"]
cores = job.builder.resources["cores"]
if ram > self.max_ram or cores > self.max_cores:
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
"than available on this host (max ram %d, max cores %d",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores,
)
self.pending_jobs.remove(job)
return
if (
self.allocated_ram + ram > self.max_ram
or self.allocated_cores + cores > self.max_cores
):
_logger.debug(
'Job "%s" cannot run yet, resources (%s) are not '
"available (already allocated ram is %d, allocated cores is %d, "
"max ram %d, max cores %d",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores,
)
n += 1
continue
if isinstance(job, JobBase):
ram = job.builder.resources["ram"]
self.allocated_ram += ram
cores = job.builder.resources["cores"]
self.allocated_cores += cores
self.taskqueue.add(
functools.partial(self._runner, job, runtime_context, TMPDIR_LOCK),
runtime_context.workflow_eval_lock,
)
self.pending_jobs.remove(job)
def wait_for_next_completion(self, runtime_context):
# type: (RuntimeContext) -> None
"""Wait for jobs to finish."""
if runtime_context.workflow_eval_lock is not None:
runtime_context.workflow_eval_lock.wait(timeout=3)
if self.exceptions:
raise self.exceptions[0]
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
self.taskqueue = TaskQueue(
threading.Lock(), psutil.cpu_count()
) # type: TaskQueue
try:
jobiter = process.job(
job_order_object, self.output_callback, runtime_context
)
if runtime_context.workflow_eval_lock is None:
raise WorkflowException(
"runtimeContext.workflow_eval_lock must not be None"
)
runtime_context.workflow_eval_lock.acquire()
for job in jobiter:
if job is not None:
if isinstance(job, JobBase):
job.builder = runtime_context.builder or job.builder
if job.outdir is not None:
self.output_dirs.add(job.outdir)
self.run_job(job, runtime_context)
if job is None:
if self.taskqueue.in_flight > 0:
self.wait_for_next_completion(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
self.run_job(None, runtime_context)
while self.taskqueue.in_flight > 0:
self.wait_for_next_completion(runtime_context)
self.run_job(None, runtime_context)
runtime_context.workflow_eval_lock.release()
finally:
self.taskqueue.drain()
self.taskqueue.join()
class NoopJobExecutor(JobExecutor):
"""Do nothing executor, for testing purposes only."""
def run_jobs(
self,
process: Process,
job_order_object: CWLObjectType,
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
pass
def execute(
self,
process: Process,
job_order_object: CWLObjectType,
runtime_context: RuntimeContext,
logger: Optional[logging.Logger] = None,
) -> Tuple[Optional[CWLObjectType], str]:
return {}, "success"
|
common-workflow-language/cwltool
|
cwltool/executors.py
|
Python
|
apache-2.0
| 18,408
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.lang import types
from mistral.lang.v2 import base
from mistral_lib import utils
class ActionSpec(base.BaseSpec):
# See http://json-schema.org
_schema = {
"type": "object",
"properties": {
"base": types.NONEMPTY_STRING,
"base-input": types.NONEMPTY_DICT,
"input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST,
"output": types.ANY_NULLABLE,
},
"required": ["base"],
"additionalProperties": False
}
def __init__(self, data, validate):
super(ActionSpec, self).__init__(data, validate)
self._name = data['name']
self._description = data.get('description')
self._tags = data.get('tags', [])
self._base = data['base']
self._base_input = data.get('base-input', {})
self._input = utils.get_dict_from_entries(data.get('input', []))
self._output = data.get('output')
self._base, _input = self._parse_cmd_and_input(self._base)
utils.merge_dicts(self._base_input, _input)
def validate_schema(self):
super(ActionSpec, self).validate_schema()
# Validate YAQL expressions.
inline_params = self._parse_cmd_and_input(self._data.get('base'))[1]
self.validate_expr(inline_params)
self.validate_expr(self._data.get('base-input', {}))
if isinstance(self._data.get('output'), str):
self.validate_expr(self._data.get('output'))
def get_name(self):
return self._name
def get_description(self):
return self._description
def get_tags(self):
return self._tags
def get_base(self):
return self._base
def get_base_input(self):
return self._base_input
def get_input(self):
return self._input
def get_output(self):
return self._output
class ActionSpecList(base.BaseSpecList):
item_class = ActionSpec
class ActionListSpec(base.BaseListSpec):
item_class = ActionSpec
def get_actions(self):
return self.get_items()
|
openstack/mistral
|
mistral/lang/v2/actions.py
|
Python
|
apache-2.0
| 2,707
|
from model.group_address import Address_data
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*7
return prefix + "".join([random.choice(symbols) for x in range(random.randrange(maxlen))])
def random_numbers(maxlen):
numbers = string.digits + " "*2 + "(" + ")" + "-"
return "".join([random.choice(numbers) for x in range(maxlen)])
def random_mail(domen, maxlen):
value = string.ascii_letters + string.digits
return "".join([random.choice(value) for x in range(random.randrange(maxlen))]) + domen
testdata = [
Address_data(firstname=random_string("firstname", 20), middlename=random_string("", 1),
lastname=random_string("lastname", 20), nickname=random_string("nickname", 20),
company=random_string("company", 20), address=random_string("address", 20),
home_phone=random_numbers(10), mobile_phone=random_numbers(10), work_phone=random_numbers(10),
fax_phone=random_numbers(10), email_1=random_mail("@mail.ru", 10), email_2=random_mail("@mail.ru", 10),
home_page=random_string("page", 15))
for x in range(n)
]
constant = [
Address_data(firstname="firstname", middlename="middlename", lastname="lastname", nickname="nickname",
company="company", address="address", home_phone="7874177", mobile_phone="784541212",
work_phone="8776464321", fax_phone="874845421", email_1="321@mail.ru", email_2="123@mail.ru",
home_page="www.page.com")
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
#with open(file, "w") as out:
# out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2))
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
esemin83/python_training
|
generator/contact.py
|
Python
|
apache-2.0
| 2,227
|
# Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, namedtuple
import csv
import datetime
import dateutil.parser
import pytz
import logging
import os
import sys
import tensorflow as tf
import yaml
import numpy as np
import hashlib
import six
from .feature_generation.file_iterator import GCSFile
""" The main column for vessel classification. """
PRIMARY_VESSEL_CLASS_COLUMN = 'label'
#TODO: (bitsofbits) think about extracting to config file
# The 'real' categories for multihotness are the fine categories, which 'coarse' and 'fishing'
# are defined in terms of. Any number of coarse categories, even with overlapping values can
# be defined in principle, although at present the interaction between the mulithot and non multihot
# versions makes that more complicated.
try:
yaml_load = yaml.safe_load
except:
yaml_load = yaml.load
raw_schema = '''
unknown:
non_fishing:
passenger:
gear:
fish_factory:
cargo_or_tanker:
bunker_or_tanker:
bunker:
tanker:
cargo_or_reefer:
cargo:
reefer:
specialized_reefer:
container_reefer:
fish_tender:
well_boat:
patrol_vessel:
research:
dive_vessel:
submarine:
dredge_non_fishing:
supply_vessel:
tug:
seismic_vessel:
helicopter:
other_not_fishing:
fishing:
squid_jigger:
drifting_longlines:
pole_and_line:
other_fishing:
trollers:
fixed_gear:
pots_and_traps:
set_longlines:
set_gillnets:
trawlers:
dredge_fishing:
seiners:
purse_seines:
tuna_purse_seines:
other_purse_seines:
other_seines:
driftnets:
'''
schema = yaml.safe_load(raw_schema)
def atomic(obj):
for k, v in obj.items():
if v is None or isinstance(v, str):
yield k
else:
for x in atomic(v):
yield x
def categories(obj, include_atomic=True):
for k, v in obj.items():
if v is None or isinstance(v, str):
if include_atomic:
yield k, [k]
else:
yield (k, list(atomic(v)))
for x in categories(v, include_atomic=include_atomic):
yield x
VESSEL_CLASS_DETAILED_NAMES = sorted(atomic(schema))
VESSEL_CATEGORIES = sorted(categories(schema))
TRAINING_SPLIT = 'Training'
TEST_SPLIT = 'Test'
FishingRange = namedtuple('FishingRange',
['start_time', 'end_time', 'is_fishing'])
def stable_hash(x):
x = six.ensure_binary(x)
digest = hashlib.blake2b(six.ensure_binary(x)).hexdigest()[-8:]
return int(digest, 16)
class VesselMetadata(object):
def __init__(self,
metadata_dict,
fishing_ranges_map):
self.metadata_by_split = metadata_dict
self.metadata_by_id = {}
self.fishing_ranges_map = fishing_ranges_map
self.id_map_int2bytes = {}
for split, vessels in metadata_dict.items():
for id_, data in vessels.items():
id_ = six.ensure_binary(id_)
self.metadata_by_id[id_] = data
idhash = stable_hash(id_)
self.id_map_int2bytes[idhash] = id_
intersection_ids = set(self.metadata_by_id.keys()).intersection(
set(fishing_ranges_map.keys()))
logging.info("Metadata for %d ids.", len(self.metadata_by_id))
logging.info("Fishing ranges for %d ids.", len(fishing_ranges_map))
logging.info("Vessels with both types of data: %d",
len(intersection_ids))
def vessel_weight(self, id_):
return self.metadata_by_id[id_][1]
def vessel_label(self, label_name, id_):
return self.metadata_by_id[id_][0][label_name]
def ids_for_split(self, split):
assert split in (TRAINING_SPLIT, TEST_SPLIT)
# Check to make sure we don't have leakage
if (set(self.metadata_by_split[TRAINING_SPLIT].keys()) &
set(self.metadata_by_split[TEST_SPLIT].keys())):
logging.warning('id in both training and test split')
return self.metadata_by_split[split].keys()
def weighted_training_list(self,
random_state,
split,
max_replication_factor,
row_filter=lambda row: True,
boundary=1):
replicated_ids = []
logging.info("Training ids: %d", len(self.ids_for_split(split)))
fishing_ranges_ids = []
for id_, (row, weight) in self.metadata_by_split[split].items():
if row_filter(row):
if id_ in self.fishing_ranges_map:
fishing_ranges_ids.append(id_)
weight = min(weight, max_replication_factor)
int_n = int(weight)
replicated_ids += ([id_] * int_n)
frac_n = weight - float(int_n)
if (random_state.uniform(0.0, 1.0) <= frac_n):
replicated_ids.append(id_)
missing = (-len(replicated_ids)) % boundary
if missing:
replicated_ids = np.concatenate(
[replicated_ids,
np.random.choice(replicated_ids, missing)])
random_state.shuffle(replicated_ids)
logging.info("Replicated training ids: %d", len(replicated_ids))
logging.info("Fishing range ids: %d", len(fishing_ranges_ids))
return replicated_ids
def fishing_range_only_list(self, random_state, split):
replicated_ids = []
fishing_id_set = set(
[k for (k, v) in self.fishing_ranges_map.items() if v])
fishing_range_only_ids = [id_
for id_ in self.ids_for_split(split)
if id_ in fishing_id_set]
logging.info("Fishing range training ids: %d / %d",
len(fishing_range_only_ids),
len(self.ids_for_split(split)))
return fishing_range_only_ids
def read_vessel_time_weighted_metadata_lines(available_ids, lines,
fishing_range_dict, split):
""" For a set of vessels, read metadata; use flat weights
Args:
available_ids: a set of all ids for which we have feature data.
lines: a list of comma-separated vessel metadata lines. Columns are
the id and a set of vessel type columns, containing at least one
called 'label' being the primary/coarse type of the vessel e.g.
(Longliner/Passenger etc.).
fishing_range_dict: dictionary of mapping id to lists of fishing ranges
Returns:
A VesselMetadata object with weights and labels for each vessel.
"""
metadata_dict = {TRAINING_SPLIT : {}, TEST_SPLIT : {}}
min_time_per_id = np.inf
for row in lines:
id_ = six.ensure_binary(row['id'].strip())
if id_ in available_ids:
if id_ not in fishing_range_dict:
continue
# Is this id included only to supress false positives
# Symptoms; fishing score for this id never different from 0
item_split = raw_item_split = row['split']
if raw_item_split in '0123456789':
if int(raw_item_split) == split:
item_split = TEST_SPLIT
else:
item_split = TRAINING_SPLIT
if item_split not in (TRAINING_SPLIT, TEST_SPLIT):
logging.warning(
'id %s has no valid split assigned (%s); using for Training',
id_, split)
split = TRAINING_SPLIT
time_for_this_id = 0
for rng in fishing_range_dict[id_]:
time_for_this_id += (
rng.end_time - rng.start_time).total_seconds()
metadata_dict[item_split][id_] = (row, time_for_this_id)
if split is None and raw_item_split in '0123456789':
# Test on everything even though we are training on everything
metadata_dict[TEST_SPLIT][id_] = (row, time_for_this_id)
if time_for_this_id:
min_time_per_id = min(min_time_per_id, time_for_this_id)
# This weighting is fiddly. We are keeping it for now to match up
# with older data, but should replace when we move to sets, etc.
MAX_WEIGHT = 100.0
for split_dict in metadata_dict.values():
for id_ in split_dict:
row, time = split_dict[id_]
split_dict[id_] = (row, min(MAX_WEIGHT, time / min_time_per_id))
return VesselMetadata(metadata_dict, fishing_range_dict)
def read_vessel_time_weighted_metadata(available_ids,
metadata_file,
fishing_range_dict={},
split=0):
reader = metadata_file_reader(metadata_file)
return read_vessel_time_weighted_metadata_lines(available_ids, reader,
fishing_range_dict,
split)
def read_vessel_multiclass_metadata_lines(available_ids, lines,
fishing_range_dict):
""" For a set of vessels, read metadata and calculate class weights.
Args:
available_ids: a set of all ids for which we have feature data.
lines: a list of comma-separated vessel metadata lines. Columns are
the id and a set of vessel type columns, containing at least one
called 'label' being the primary/coarse type of the vessel e.g.
(Longliner/Passenger etc.).
fishing_range_dict: dictionary of mapping id to lists of fishing ranges
Returns:
A VesselMetadata object with weights and labels for each vessel.
"""
vessel_type_set = set()
dataset_kind_counts = defaultdict(lambda: defaultdict(lambda: 0))
vessel_types = []
cat_map = {k: v for (k, v) in VESSEL_CATEGORIES}
available_ids = set(available_ids)
for row in lines:
id_ = six.ensure_binary(row['id'].strip())
if id_ not in available_ids:
continue
raw_vessel_type = row[PRIMARY_VESSEL_CLASS_COLUMN]
if not raw_vessel_type:
continue
atomic_types = set()
for kind in raw_vessel_type.split('|'):
try:
for atm in cat_map[kind]:
atomic_types.add(atm)
except StandardError as err:
logging.warning('unknown vessel type: {}\n{}'.format(kind, err))
if not atomic_types:
continue
scale = 1.0 / len(atomic_types)
split = row['split'].strip()
assert split in ('Training', 'Test'), repr(split)
vessel_types.append((id_, split, raw_vessel_type, row))
for atm in atomic_types:
dataset_kind_counts[split][atm] += scale
vessel_type_set |= atomic_types
# else:
# logging.warning('No training data for %s, (%s) %s %s', id_, sorted(available_ids)[:10],
# type(id_), type(sorted(available_ids)[0]))
# # Calculate weights for each vessel type per split, for
# # now use weights of sqrt(max_count / count)
dataset_kind_weights = defaultdict(lambda: {})
for split, counts in dataset_kind_counts.items():
max_count = max(counts.values())
for atomic_vessel_type, count in counts.items():
dataset_kind_weights[split][atomic_vessel_type] = np.sqrt(max_count / float(count))
metadata_dict = defaultdict(lambda: {})
for id_, split, raw_vessel_type, row in vessel_types:
if split == 'Training':
weights = []
for kind in raw_vessel_type.split('|'):
for atm in cat_map.get(kind, 'unknown'):
weights.append(dataset_kind_weights[split][atm])
metadata_dict[split][id_] = (row, np.mean(weights))
elif split == "Test":
metadata_dict[split][id_] = (row, 1.0)
else:
logging.warning("unknown split {}".format(split))
if len(vessel_type_set) == 0:
logging.fatal('No vessel types found for training.')
sys.exit(-1)
logging.info("Vessel types: %s", list(vessel_type_set))
return VesselMetadata(
dict(metadata_dict), fishing_range_dict)
def metadata_file_reader(metadata_file):
"""
"""
with open(metadata_file, 'r') as f:
reader = csv.DictReader(f)
logging.info("Metadata columns: %s", reader.fieldnames)
for row in reader:
yield row
def read_vessel_multiclass_metadata(available_ids,
metadata_file,
fishing_range_dict={}):
reader = metadata_file_reader(metadata_file)
return read_vessel_multiclass_metadata_lines(
available_ids, reader, fishing_range_dict)
def find_available_ids(feature_path):
with tf.Session() as sess:
logging.info('Reading id list file.')
root_output_path = os.path.dirname(feature_path)
# The feature pipeline stage that outputs the id list is sharded to only
# produce a single file, so no need to glob or loop here.
id_path = os.path.join(root_output_path, 'ids/part-00000-of-00001.txt')
logging.info('Reading id list file from {}'.format(id_path))
with GCSFile(id_path) as f:
els = f.read().split(b'\n')
id_list = [id_.strip() for id_ in els if id_.strip() != '']
logging.info('Found %d ids.', len(id_list))
return set(id_list)
def parse_date(date):
try:
unix_timestamp = float(date)
return datetime.datetime.utcfromtimestamp(unix_timestamp).replace(
tzinfo=pytz.utc)
except:
try:
return dateutil.parser.parse(date)
except:
logging.fatal('could not parse date "{}"'.format(date))
raise
def read_fishing_ranges(fishing_range_file):
""" Read vessel fishing ranges, return a dict of id to classified fishing
or non-fishing ranges for that vessel.
"""
fishing_range_dict = defaultdict(lambda: [])
with open(fishing_range_file, 'r') as f:
for l in f.readlines()[1:]:
els = l.split(',')
id_ = six.ensure_binary(els[0].strip())
start_time = parse_date(els[1]).replace(tzinfo=pytz.utc)
end_time = parse_date(els[2]).replace(tzinfo=pytz.utc)
is_fishing = float(els[3])
fishing_range_dict[id_].append(
FishingRange(start_time, end_time, is_fishing))
return dict(fishing_range_dict)
def build_multihot_lookup_table():
n_base = len(VESSEL_CLASS_DETAILED_NAMES)
n_categories = len(VESSEL_CATEGORIES)
#
table = np.zeros([n_categories, n_base], dtype=np.int32)
for i, (_, base_labels) in enumerate(VESSEL_CATEGORIES):
for lbl in base_labels:
j = VESSEL_CLASS_DETAILED_NAMES.index(lbl)
table[i, j] = 1
return table
multihot_lookup_table = build_multihot_lookup_table()
def multihot_encode(label):
"""Multihot encode based on fine, coarse and is_fishing label
Args:
label: Tensor (int)
Returns:
Tensor with bits set for every allowable vessel type based on the inputs
"""
tf_multihot_lookup_table = tf.convert_to_tensor(multihot_lookup_table)
return tf.gather(tf_multihot_lookup_table, label)
|
GlobalFishingWatch/vessel-classification
|
classification/metadata.py
|
Python
|
apache-2.0
| 16,174
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.shortcuts import redirect
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import fiware_api
from openstack_dashboard.dashboards.idm import utils as idm_utils
from openstack_dashboard.dashboards.idm.home import tables as home_tables
LOG = logging.getLogger('idm_logger')
class IndexView(tables.MultiTableView):
table_classes = (home_tables.OrganizationsTable,
home_tables.ApplicationsTable)
template_name = 'idm/home/index.html'
def dispatch(self, request, *args, **kwargs):
if request.organization.id != request.user.default_project_id:
return redirect("/idm/home_orgs/")
return super(IndexView, self).dispatch(request, *args, **kwargs)
def has_more_data(self, table):
return False
def get_organizations_data(self):
organizations = []
# try:
# organizations = fiware_api.keystone.project_list(
# self.request,
# user=self.request.user.id)
# switchable_organizations = [org.id for org
# in self.request.organizations]
# organizations = sorted(organizations, key=lambda x: x.name.lower())
# for org in organizations:
# if org.id in switchable_organizations:
# setattr(org, 'switchable', True)
# except Exception:
# exceptions.handle(self.request,
# ("Unable to retrieve organization list."))
return idm_utils.filter_default(organizations)
def get_applications_data(self):
applications = []
# try:
# # TODO(garcianavalon) extract to fiware_api
# all_apps = fiware_api.keystone.application_list(self.request)
# apps_with_roles = [a.application_id for a
# in fiware_api.keystone.user_role_assignments(
# self.request,
# user=self.request.user.id,
# organization=self.request.organization)]
# applications = [app for app in all_apps
# if app.id in apps_with_roles]
# applications = sorted(applications, key=lambda x: x.name.lower())
# except Exception:
# exceptions.handle(self.request,
# ("Unable to retrieve application list."))
return idm_utils.filter_default(applications)
|
ging/horizon
|
openstack_dashboard/dashboards/idm/home/views.py
|
Python
|
apache-2.0
| 3,184
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating ensembles predictions
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_ensemble_steps as ensemble_create
from . import create_prediction_steps as prediction_create
class TestEnsemblePrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a prediction from an ensemble:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble of <number_of_models> models and <tlp> tlp
And I wait until the ensemble is ready less than <time_3> secs
When I create an ensemble prediction for "<data_input>"
And I wait until the prediction is ready less than <time_4> secs
Then the prediction for "<objective>" is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | number_of_models | tlp | data_input | objective | prediction |
| ../data/iris.csv | 10 | 10 | 50 | 20 | 5 | 1 | {"petal width": 0.5} | 000004 | Iris-versicolor |
| ../data/iris_sp_chars.csv | 10 | 10 | 50 | 20 | 5 | 1 | {"pétal&width\\u0000": 0.5} | 000004 | Iris-versicolor |
| ../data/grades.csv | 10 | 10 | 150 | 20 | 10 | 1 | {"Assignment": 81.22, "Tutorial": 91.95, "Midterm": 79.38, "TakeHome": 105.93} | 000005 | 88.205575 |
| ../data/grades.csv | 10 | 10 | 150 | 20 | 10 | 1 | {"Assignment": 97.33, "Tutorial": 106.74, "Midterm": 76.88, "TakeHome": 108.89} | 000005 | 84.29401 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '30', '30', '50', '20', '5', '1', '{"petal width": 0.5}', '000004', 'Iris-versicolor'],
['data/iris_sp_chars.csv', '30', '30', '50', '20', '5', '1', '{"pétal&width\\u0000": 0.5}', '000004', 'Iris-versicolor'],
['data/grades.csv', '30', '30', '150', '20', '10', '1', '{"Assignment": 81.22, "Tutorial": 91.95, "Midterm": 79.38, "TakeHome": 105.93}', '000005', '84.556'],
['data/grades.csv', '30', '30', '150', '20', '10', '1', '{"Assignment": 97.33, "Tutorial": 106.74, "Midterm": 76.88, "TakeHome": 108.89}', '000005', '73.13558']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self, example[5], example[6])
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[3])
prediction_create.i_create_an_ensemble_prediction(self, example[7])
prediction_create.the_prediction_is_finished_in_less_than(self, example[4])
prediction_create.the_prediction_is(self, example[8], example[9])
|
jaor/python
|
bigml/tests/test_09_ensemble_prediction.py
|
Python
|
apache-2.0
| 4,365
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def f(a):
global a
|
kayhayen/Nuitka
|
tests/syntax/GlobalForParameter.py
|
Python
|
apache-2.0
| 793
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import codecs
import os
import re
from pkg_resources import resource_string
from pygments.formatters.html import HtmlFormatter
from pygments.styles import get_all_styles
from pants.backend.docgen.targets.doc import Page
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.generator import Generator
from pants.base.workunit import WorkUnitLabel
from pants.binaries import binary_util
from pants.build_graph.address import Address
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
def util():
"""Indirection function so we can lazy-import our utils.
It's an expensive import that invokes re.compile a lot (via markdown and pygments),
so we don't want to incur that cost unless we must.
"""
from pants.backend.docgen.tasks import markdown_to_html_utils
return markdown_to_html_utils
class MarkdownToHtml(Task):
"""Generate HTML from Markdown docs."""
@classmethod
def register_options(cls, register):
register('--code-style', choices=list(get_all_styles()), default='friendly',
fingerprint=True,
help='Use this stylesheet for code highlights.')
register('--open', type=bool,
help='Open the generated documents in a browser.')
register('--fragment', type=bool,
fingerprint=True,
help='Generate a fragment of html to embed in a page.')
register('--ignore-failure', type=bool,
fingerprint=True,
help='Do not consider rendering errors to be build errors.')
@classmethod
def product_types(cls):
return ['markdown_html', 'wiki_html']
def __init__(self, *args, **kwargs):
super(MarkdownToHtml, self).__init__(*args, **kwargs)
self._templates_dir = os.path.join('templates', 'markdown')
self.open = self.get_options().open
self.fragment = self.get_options().fragment
self.code_style = self.get_options().code_style
def execute(self):
# TODO(John Sirois): consider adding change detection
outdir = os.path.join(self.get_options().pants_distdir, 'markdown')
css_path = os.path.join(outdir, 'css', 'codehighlight.css')
css = util().emit_codehighlight_css(css_path, self.code_style)
if css:
self.context.log.info('Emitted {}'.format(css))
def is_page(target):
return isinstance(target, Page)
roots = set()
interior_nodes = set()
if self.open:
dependencies_by_page = self.context.dependents(on_predicate=is_page, from_predicate=is_page)
roots.update(dependencies_by_page.keys())
for dependencies in dependencies_by_page.values():
interior_nodes.update(dependencies)
roots.difference_update(dependencies)
for page in self.context.targets(is_page):
# There are no in or out edges so we need to show show this isolated page.
if not page.dependencies and page not in interior_nodes:
roots.add(page)
with self.context.new_workunit(name='render', labels=[WorkUnitLabel.MULTITOOL]):
plaingenmap = self.context.products.get('markdown_html')
wikigenmap = self.context.products.get('wiki_html')
show = []
for page in self.context.targets(is_page):
def process_page(key, outdir, url_builder, genmap, fragment=False):
if page.format == 'rst':
with self.context.new_workunit(name='rst') as workunit:
html_path = self.process_rst(
workunit,
page,
os.path.join(outdir, util().page_to_html_path(page)),
os.path.join(page.payload.sources.rel_path, page.source),
self.fragment or fragment,
)
else:
with self.context.new_workunit(name='md'):
html_path = self.process_md(
os.path.join(outdir, util().page_to_html_path(page)),
os.path.join(page.payload.sources.rel_path, page.source),
self.fragment or fragment,
url_builder,
css=css,
)
self.context.log.info('Processed {} to {}'.format(page.source, html_path))
relpath = os.path.relpath(html_path, outdir)
genmap.add(key, outdir, [relpath])
return html_path
def url_builder(linked_page):
dest = util().page_to_html_path(linked_page)
src_dir = os.path.dirname(util().page_to_html_path(page))
return linked_page.name, os.path.relpath(dest, src_dir)
page_path = os.path.join(outdir, 'html')
html = process_page(page, page_path, url_builder, plaingenmap)
if css and not self.fragment:
plaingenmap.add(page, self.workdir, list(css_path))
if self.open and page in roots:
show.append(html)
if page.provides:
for wiki in page.provides:
basedir = os.path.join(self.workdir, str(hash(wiki)))
process_page((wiki, page), basedir, wiki.wiki.url_builder, wikigenmap, fragment=True)
if show:
binary_util.ui_open(*show)
PANTS_LINK = re.compile(r'''pants\(['"]([^)]+)['"]\)(#.*)?''')
def process_md(self, output_path, source, fragmented, url_builder, css=None):
def parse_url(spec):
match = self.PANTS_LINK.match(spec)
if match:
address = Address.parse(match.group(1), relative_to=get_buildroot())
page = self.context.build_graph.get_target(address)
anchor = match.group(2) or ''
if not page:
raise TaskError('Invalid markdown link to pants target: "{}". '.format(match.group(1)) +
'Is your page missing a dependency on this target?')
alias, url = url_builder(page)
return alias, url + anchor
else:
return spec, spec
def build_url(label):
components = label.split('|', 1)
if len(components) == 1:
return parse_url(label.strip())
else:
alias, link = components
_, url = parse_url(link.strip())
return alias, url
wikilinks = util().WikilinksExtension(build_url)
safe_mkdir(os.path.dirname(output_path))
with codecs.open(output_path, 'w', 'utf-8') as output:
source_path = os.path.join(get_buildroot(), source)
with codecs.open(source_path, 'r', 'utf-8') as source_stream:
md_html = util().markdown.markdown(
source_stream.read(),
extensions=['codehilite(guess_lang=False)',
'extra',
'tables',
'toc',
wikilinks,
util().IncludeExcerptExtension(source_path)],
)
if fragmented:
style_css = (HtmlFormatter(style=self.code_style)).get_style_defs('.codehilite')
template = resource_string(__name__,
os.path.join(self._templates_dir, 'fragment.mustache'))
generator = Generator(template, style_css=style_css, md_html=md_html)
generator.write(output)
else:
style_link = os.path.relpath(css, os.path.dirname(output_path))
template = resource_string(__name__, os.path.join(self._templates_dir, 'page.mustache'))
generator = Generator(template, style_link=style_link, md_html=md_html)
generator.write(output)
return output.name
def process_rst(self, workunit, page, output_path, source, fragmented):
source_path = os.path.join(get_buildroot(), source)
with codecs.open(source_path, 'r', 'utf-8') as source_stream:
rst_html, returncode = util().rst_to_html(source_stream.read(),
stderr=workunit.output('stderr'))
if returncode != 0:
message = '{} rendered with errors.'.format(source_path)
if self.get_options().ignore_failure:
self.context.log.warn(message)
else:
raise TaskError(message, exit_code=returncode, failed_targets=[page])
template_path = os.path.join(self._templates_dir,
'fragment.mustache' if fragmented else 'page.mustache')
template = resource_string(__name__, template_path)
generator = Generator(template, md_html=rst_html)
safe_mkdir(os.path.dirname(output_path))
with codecs.open(output_path, 'w', 'utf-8') as output:
generator.write(output)
return output.name
|
dbentley/pants
|
src/python/pants/backend/docgen/tasks/markdown_to_html.py
|
Python
|
apache-2.0
| 8,705
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.compute import test
from jacket.compute.virt.disk.mount import api
from jacket.compute.virt.disk.mount import block
from jacket.compute.virt.disk.mount import loop
from jacket.compute.virt.disk.mount import nbd
from jacket.compute.virt.image import model as imgmodel
PARTITION = 77
ORIG_DEVICE = "/dev/null"
AUTOMAP_PARTITION = "/dev/nullp77"
MAP_PARTITION = "/dev/mapper/nullp77"
class MountTestCase(test.NoDBTestCase):
def setUp(self):
super(MountTestCase, self).setUp()
def _test_map_dev(self, partition):
mount = api.Mount(mock.sentinel.image, mock.sentinel.mount_dir)
mount.device = ORIG_DEVICE
mount.partition = partition
mount.map_dev()
return mount
@mock.patch('compute.utils.trycmd')
def _test_map_dev_with_trycmd(self, partition, trycmd):
trycmd.return_value = [None, None]
mount = self._test_map_dev(partition)
self.assertEqual(1, trycmd.call_count) # don't care about args
return mount
def _exists_effect(self, data):
def exists_effect(filename):
try:
v = data[filename]
if isinstance(v, list):
if len(v) > 0:
return v.pop(0)
self.fail("Out of items for: %s" % filename)
return v
except KeyError:
self.fail("Unexpected call with: %s" % filename)
return exists_effect
def _check_calls(self, exists, filenames):
self.assertEqual([mock.call(x) for x in filenames],
exists.call_args_list)
@mock.patch('os.path.exists')
def test_map_dev_partition_search(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True})
mount = self._test_map_dev(-1)
self._check_calls(exists, [ORIG_DEVICE])
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_good(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: [False, True]})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION])
self.assertEqual("", mount.error)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_error(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: False})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION])
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_automap(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, AUTOMAP_PARTITION])
self.assertEqual(AUTOMAP_PARTITION, mount.mapped_device)
self.assertTrue(mount.automapped)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_else(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(None)
self._check_calls(exists, [ORIG_DEVICE])
self.assertEqual(ORIG_DEVICE, mount.mapped_device)
self.assertFalse(mount.automapped)
self.assertTrue(mount.mapped)
def test_instance_for_format_raw(self):
image = imgmodel.LocalFileImage("/some/file.raw",
imgmodel.FORMAT_RAW)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_format_qcow2(self):
image = imgmodel.LocalFileImage("/some/file.qcows",
imgmodel.FORMAT_QCOW2)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_format_block(self):
image = imgmodel.LocalBlockImage(
"/dev/mapper/instances--instance-0000001_disk",)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_loop(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/loop0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_loop_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/loop0p1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_nbd(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/nbd0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_nbd_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/nbd0p1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_block(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/mapper/instances--instance-0000001_disk'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_block_partiton(self,):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/instances--instance-0000001_diskp1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
|
HybridF5/jacket
|
jacket/tests/compute/unit/virt/disk/mount/test_api.py
|
Python
|
apache-2.0
| 7,718
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative set by adding a companion creative.
To determine which creative sets exist, run get_all_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the creative set to update.
CREATIVE_SET_ID = 'INSERT_CREATIVE_SET_ID_HERE'
COMPANION_CREATIVE_ID = 'INSERT_COMPANION_CREATIVE_ID_HERE'
def main(client, creative_set_id, companion_creative_id):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v202108')
# Create statement to select a single creative set by ID.
statement = (ad_manager.StatementBuilder(version='v202108')
.Where('id = :creativeSetId')
.WithBindVariable('creativeSetId', int(creative_set_id)))
# Get creative set.
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_created_sets = []
for creative_set in response['results']:
creative_set['companionCreativeIds'].append(companion_creative_id)
updated_created_sets.append(creative_set)
# Update the creative sets on the server.
creative_sets = creative_set_service.updateCreativeSet(updated_created_sets)
# Display results.
for creative_set in creative_sets:
print(('Creative set with ID "%s", master creative ID "%s", and '
'companion creative IDs {%s} was updated.')
% (creative_set['id'], creative_set['masterCreativeId'],
','.join(creative_set['companionCreativeIds'])))
else:
print('No creative sets found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, CREATIVE_SET_ID, COMPANION_CREATIVE_ID)
|
googleads/googleads-python-lib
|
examples/ad_manager/v202108/creative_set_service/update_creative_set.py
|
Python
|
apache-2.0
| 2,796
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.openstack.common.db import api as db_api
from nova.openstack.common import log as logging
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
class NoMoreNetworks(exception.NovaException):
"""No more available networks."""
pass
class NoMoreTargets(exception.NovaException):
"""No more available targets."""
pass
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
return IMPL.service_get_by_compute_host(context, host)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a computeNode."""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_all(context):
"""Get all computeNodes."""
return IMPL.compute_node_get_all(context)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get computeNodes given a hypervisor hostname match string."""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Set the given properties on a computeNode and update it.
Raises ComputeHostNotFound if computeNode does not exist.
"""
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
def compute_node_delete(context, compute_id):
"""Delete a computeNode from the database.
Raises ComputeHostNotFound if computeNode does not exist.
"""
return IMPL.compute_node_delete(context, compute_id)
def compute_node_statistics(context):
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_bulk_destroy(context, ips):
"""Destroy a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_destroy(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id, session=None):
"""Count floating ips used by project."""
return IMPL.floating_ip_count_by_project(context, project_id,
session=session)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was not associated to an ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was already associated to the fixed ip.
"""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
"""
Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute)
def migration_get_in_progress_by_host_and_node(context, host, node):
"""Finds all migrations for the given host + node that are not yet
confirmed or reverted.
"""
return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id, get_network=False):
"""Get fixed ip by id or raise if it does not exist.
If get_network is true, also return the assocated network.
"""
return IMPL.fixed_ip_get(context, id, get_network)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_by_address_detailed(context, address):
"""Get detailed fixed ip info by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address_detailed(context, address)
def fixed_ip_get_by_floating_address(context, floating_address):
"""Get a fixed ip by a floating address."""
return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
def fixed_ip_count_by_project(context, project_id, session=None):
"""Count fixed ips used by project."""
return IMPL.fixed_ip_count_by_project(context, project_id,
session=session)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_data_get_for_project(context, project_id, session=None):
"""Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id,
session=session)
def instance_destroy(context, instance_uuid, constraint=None,
update_cells=True):
"""Destroy the instance or raise if it does not exist."""
rv = IMPL.instance_destroy(context, instance_uuid, constraint)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance destroy"))
return rv
def instance_get_by_uuid(context, uuid):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_host(context, host, columns_to_join=None):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host, columns_to_join)
def instance_get_all_by_host_and_node(context, host, node):
"""Get all instances belonging to a node."""
return IMPL.instance_get_all_by_host_and_node(context, host, node)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_floating_address_get_all(context, instance_uuid):
"""Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_update(context, instance_uuid, values, update_cells=True):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_update_and_get_original(context, instance_uuid, values):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_id: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update_and_get_original(context, instance_uuid, values)
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
###################
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values,
update_cells=True):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
rv = IMPL.instance_info_cache_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(
context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance info "
"cache update"))
return rv
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_disassociate(context, network_id, disassociate_host=True,
disassociate_project=True):
"""Disassociate the network from project or host and raise if it does
not exist."""
return IMPL.network_disassociate(context, network_id, disassociate_host,
disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id, project_only=project_only)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
def network_get_all_by_uuids(context, network_uuids,
project_only="allow_none"):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids,
project_only=project_only)
# pylint: disable=C0103
def network_in_use_on_host(context, network_id, host=None):
"""Indicates if a network is currently in use on host."""
return IMPL.network_in_use_on_host(context, network_id, host)
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###############
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, resource, **kwargs)
###################
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire):
"""Create a reservation for the given project and resource."""
return IMPL.reservation_create(context, uuid, usage, project_id,
resource, delta, expire)
def reservation_get(context, uuid):
"""Retrieve a reservation or raise if it does not exist."""
return IMPL.reservation_get(context, uuid)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_update_or_create(context, values):
"""Update an entry of block device mapping.
If not existed, create a new entry"""
return IMPL.block_device_mapping_update_or_create(context, values)
def block_device_mapping_get_all_by_instance(context, instance_uuid):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
"""Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id.
Returns a tuple with the first element being a bool indicating
if the default security group previously existed. Second
element is the dict used to create the default security group.
"""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_count_by_project(context, project_id, session=None):
"""Count number of security groups in a project."""
return IMPL.security_group_count_by_project(context, project_id,
session=session)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def security_group_default_rule_get(context, security_group_rule_default_id):
return IMPL.security_group_default_rule_get(context,
security_group_rule_default_id)
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
return IMPL.security_group_default_rule_destroy(
context, security_group_rule_default_id)
def security_group_default_rule_create(context, values):
return IMPL.security_group_default_rule_create(context, values)
def security_group_default_rule_list(context):
return IMPL.security_group_default_rule_list(context)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def instance_type_create(context, values):
"""Create a new instance type."""
return IMPL.instance_type_create(context, values)
def instance_type_get_all(context, inactive=False, filters=None):
"""Get all instance types."""
return IMPL.instance_type_get_all(
context, inactive=inactive, filters=filters)
def instance_type_get(context, id):
"""Get instance type by id."""
return IMPL.instance_type_get(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
"""Get instance type by flavor id."""
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
"""Delete an instance type."""
return IMPL.instance_type_destroy(context, name)
def instance_type_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
def instance_type_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.instance_type_access_add(context, flavor_id, project_id)
def instance_type_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.instance_type_access_remove(context, flavor_id, project_id)
####################
def cell_create(context, values):
"""Create a new child Cell entry."""
return IMPL.cell_create(context, values)
def cell_update(context, cell_name, values):
"""Update a child Cell entry."""
return IMPL.cell_update(context, cell_name, values)
def cell_delete(context, cell_name):
"""Delete a child Cell."""
return IMPL.cell_delete(context, cell_name)
def cell_get(context, cell_name):
"""Get a specific child Cell."""
return IMPL.cell_get(context, cell_name)
def cell_get_all(context):
"""Get all child Cells."""
return IMPL.cell_get_all(context)
####################
def instance_metadata_get_all(context, search_filts):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get_all(context, search_filts)
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context, hypervisor=None):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context, hypervisor)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get(context, uuid, start_period, mac):
"""Return bw usage for instance and mac in a given audit period."""
return IMPL.bw_usage_get(context, uuid, start_period, mac)
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
update_cells=True):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
if update_cells:
try:
cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed)
except Exception:
LOG.exception(_("Failed to notify cells of bw_usage update"))
return rv
####################
def instance_type_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, flavor_id)
def instance_type_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, flavor_id, key)
def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
###################
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return IMPL.vol_get_usage_by_time(context, begin)
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id,
last_refreshed=None, update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed."""
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
last_refreshed=last_refreshed,
update_totals=update_totals)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_host_get_by_metadata_key(context, key):
"""Get hosts with a specific metadata key metadata for all aggregates.
Returns a dictionary where each key is a hostname and each value is a set
of the key values
return value: {machine: set( az1, az2 )}
"""
return IMPL.aggregate_host_get_by_metadata_key(context, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values, update_cells=True):
"""Create a new Instance Fault."""
rv = IMPL.instance_fault_create(context, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance fault"))
return rv
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def action_start(context, values):
"""Start an action for an instance."""
return IMPL.action_start(context, values)
def action_finish(context, values):
"""Finish an action for an instance."""
return IMPL.action_finish(context, values)
def actions_get(context, uuid):
"""Get all instance actions for the provided instance."""
return IMPL.actions_get(context, uuid)
def action_get_by_request_id(context, uuid, request_id):
"""Get the action by request_id and given instance."""
return IMPL.action_get_by_request_id(context, uuid, request_id)
def action_event_start(context, values):
"""Start an event on an instance action."""
return IMPL.action_event_start(context, values)
def action_event_finish(context, values):
"""Finish an event on an instance action."""
return IMPL.action_event_finish(context, values)
def action_events_get(context, action_id):
"""Get the events by action id."""
return IMPL.action_events_get(context, action_id)
def action_event_get_by_id(context, action_id, event_id):
return IMPL.action_event_get_by_id(context, action_id, event_id)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_uuid, id=None):
"""Create the ec2 id to instance uuid mapping on demand."""
return IMPL.ec2_instance_create(context, instance_uuid, id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None):
"""Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None):
"""Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state)
####################
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to corresponding shadow
tables.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows(context, max_rows=max_rows)
def archive_deleted_rows_for_table(context, tablename, max_rows=None):
"""Move up to max_rows rows from tablename to corresponding shadow
table.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows)
|
zestrada/nova-cs498cc
|
nova/db/api.py
|
Python
|
apache-2.0
| 56,325
|
import sys, os
sys.path.append(os.path.abspath("../utils/"))
from utils_io_list import *
from test_utils_io_folder import *
def test_generate_pairs_for_each_folder():
images_folder_path= "folder/path/example"
num_of_frames = 2
pairs = generate_pairs_for_each_folder(images_folder_path, num_of_frames)
expected_pair = [("example", 0), ("example", 1)]
if expected_pair == pairs:
return True
else:
return False
def test_generate_num_of_frames_list():
folders_paths_list = ['../temp_folder_1', '../temp_folder_2']
for folder_path in folders_paths_list:
create_folder(folder_path)
create_dummy_files_in_folder(folder_path)
num_of_frames_list = generate_num_of_frames_list(folders_paths_list)
for folder_path in folders_paths_list:
shutil.rmtree(folder_path)
expected_list = [10, 10]
if expected_list == num_of_frames_list:
return True
else:
return False
def test_generate_pairs_with_two_lists():
folders_paths_list = ['../temp_folder_1', '../temp_folder_2']
num_of_frames_list = [1, 2]
pairs_list = generate_pairs_with_two_lists(folders_paths_list, num_of_frames_list)
expected_list = [('temp_folder_1', 0), ('temp_folder_2', 0), ('temp_folder_2', 1)]
if expected_list == pairs_list:
return True
else:
return False
def test_generate_pairs_list_for_training():
dataset_folder_path = '/home/ngh/dev/ROLO-dev/benchmark/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0000/'
output_folder_path = '/home/ngh/dev/ROLO-TRACK/training_list/'
create_folder(output_folder_path)
txt_file_path = os.path.join(output_folder_path, 'list_0.txt')
numpy_file_path = os.path.join(output_folder_path, 'list_0')
finished = generate_pairs_list_for_training(dataset_folder_path, numpy_file_path, txt_file_path)
if finished is True:
return True
else:
return False
def main():
print("Testing: utils_io_list")
passed = test_generate_num_of_frames_list()
if passed is False:
print("test_generate_num_of_frames_list failed")
passed = test_generate_pairs_for_each_folder()
if passed is False:
print("test_generate_pairs_for_each_folder failed")
passed = test_generate_pairs_with_two_lists()
if passed is False:
print("test_generate_pairs_with_two_lists failed")
passed = test_generate_pairs_list_for_training()
if passed is False:
print("test_generate_pairs_list_for_training failed")
if __name__ == "__main__":
main()
|
Guanghan/ROLO
|
update/unit_test/test_utils_io_list.py
|
Python
|
apache-2.0
| 2,578
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import caching.base
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0018_flag_note'),
]
operations = [
migrations.CreateModel(
name='SiteMode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('show_question_votes', models.BooleanField(default=True)),
('show_total_votes', models.BooleanField(default=True)),
('allow_sorting_by_votes', models.BooleanField(default=True)),
('allow_voting_and_submitting_questions', models.BooleanField(default=True)),
('debate_time', models.DateTimeField(default=datetime.datetime(2099, 1, 1, 0, 0), help_text=b'Enter time that debate starts in timezone America/New_York')),
],
bases=(caching.base.CachingMixin, models.Model),
),
]
|
ejucovy/django-opendebates
|
opendebates/migrations/0019_sitemode.py
|
Python
|
apache-2.0
| 1,059
|
# Copyright (c) 2013 Red Hat, Inc.
# Author: William Benton (willb@redhat.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from proxy import Proxy, proxied_attr
from proxy import proxied_attr_get as pag, proxied_attr_set as pas, proxied_attr_getset as pags
from arc_utils import arcmethod, uniq
from singleton import v as store_singleton
import errors
from errors import not_implemented, fail
from constants import PARTITION_GROUP, LABEL_SENTINEL_PARAM, LABEL_SENTINEL_PARAM_ATTR
from datetime import datetime
import calendar
import urllib
def ts():
now = datetime.utcnow()
return (calendar.timegm(now.utctimetuple()) * 1000000) + now.microsecond
class node(Proxy):
name = property(pag("name"))
memberships = property(*pags("memberships"))
identity_group = property(lambda self : self.cm.make_proxy_object("group", self.attr_vals["identity_group"], refresh=True))
provisioned = property(*pags("provisioned"))
last_updated_version = property(pag("last_updated_version"))
modifyMemberships = arcmethod(pag("memberships"), pas("memberships"), heterogeneous=True, preserve_order=True)
def getConfig(self, **options):
if options.has_key("version"):
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":options["version"]}, {})
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name))
def makeProvisioned(self):
self.provisioned = True
self.update()
def explain(self):
not_implemented()
def checkin(self):
metapath = "/meta/node/%s" % self.name
# now = datetime.utcnow().isoformat()
now = ts()
meta = self.cm.fetch_json_resource(metapath, False, default={})
meta["last-checkin"] = now
self.cm.put_json_resource(metapath, meta, False)
return now
def last_checkin(self):
metapath = "/meta/node/%s" % self.name
meta = self.cm.fetch_json_resource(metapath, False, default={})
return meta.has_key("last-checkin") and meta["last-checkin"] or 0
def whatChanged(self, old, new):
oc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":old}, {})
nc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":new}, {})
ock = set(oc)
nck = set(nc)
params = set([p for p in (ock | nck) if p not in ock or p not in nck or oc[p] != nc[p]]) - set(["WALLABY_CONFIG_VERSION"])
mc_params = set([p for p in params if store_singleton().getParam(p).must_change])
subsystems = [store_singleton().getSubsys(sub) for sub in self.cm.list_objects("subsystem")]
restart, reconfig = [], []
for ss in subsystems:
ss.refresh
ssp = set(ss.parameters)
if ssp.intersection(mc_params):
restart.append(ss.name)
elif ssp.intersection(params):
reconfig.append(ss.name)
return [list(params), restart, reconfig]
# labeling support below
def getLabels(self):
memberships = self.memberships
if not PARTITION_GROUP in memberships:
return []
else:
partition = memberships.index(PARTITION_GROUP)
return memberships[partition+1:]
labels=property(getLabels)
def modifyLabels(self, op, labels, **options):
thestore = store_singleton()
memberships = self.memberships
current_labels = self.getLabels()
label_set = set(current_labels + [PARTITION_GROUP])
new_labels = []
if op == "ADD":
new_labels = current_labels + labels
pass
elif op == "REPLACE":
new_labels = labels
pass
elif op == "REMOVE":
new_labels = [label for label in current_labels if label not in labels]
else:
raise NotImplementedError("modifyLabels: operation " + op + " not understood")
just_memberships = [grp for grp in memberships if grp not in label_set]
new_memberships = uniq(just_memberships + [PARTITION_GROUP] + new_labels)
if "ensure_partition_group" in options and options["ensure_partition_group"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the ensure_partition_group option")
thestore.getPartitionGroup()
if "create_missing_labels" in options and options["create_missing_labels"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the create_missing_labels option")
for missing_label in thestore.checkGroupValidity(new_labels):
thestore.addLabel(missing_label)
return self.modifyMemberships("REPLACE", new_memberships, {})
proxied_attr(node, "name")
proxied_attr(node, "memberships")
proxied_attr(node, "identity_group")
proxied_attr(node, "provisioned")
|
willb/wallaroo
|
clients/python-wallaroo/wallaroo/client/node.py
|
Python
|
apache-2.0
| 5,705
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer.
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_conv2d',
reuse=None):
"""Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the weight scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor`.
units: An int of the last dimension size of output.
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` where the last dimension has size `units`.
"""
x = tf.layers.flatten(x)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.dense(
x,
kernel_shape[1],
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=(x.shape.as_list()[-1], units),
bias_shape=(units,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
|
tensorflow/gan
|
tensorflow_gan/examples/progressive_gan/layers.py
|
Python
|
apache-2.0
| 9,208
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import uuid
import mock
import yaml
from mistralclient.api.v2 import action_executions
from mistralclient.api.v2 import executions
from mistralclient.api.v2 import workflows
from oslo_config import cfg
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from mistral_v2 import MistralRunner
import st2common
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import policiesregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as action_constants
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.policy import Policy
from st2common.runners import base as runners
from st2common.services import action as action_service
from st2common.transport.liveaction import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2common.util import loader
from st2tests import DbTestCase
from st2tests import fixturesloader
from st2tests.mocks.liveaction import MockLiveActionPublisher
MISTRAL_RUNNER_NAME = 'mistral_v2'
TEST_PACK = 'mistral_tests'
TEST_PACK_PATH = fixturesloader.get_fixtures_packs_base_path() + '/' + TEST_PACK
PACKS = [
TEST_PACK_PATH,
fixturesloader.get_fixtures_packs_base_path() + '/core'
]
# Non-workbook with a single workflow
WF1_META_FILE_NAME = 'workflow_v2.yaml'
WF1_META_FILE_PATH = TEST_PACK_PATH + '/actions/' + WF1_META_FILE_NAME
WF1_META_CONTENT = loader.load_meta_file(WF1_META_FILE_PATH)
WF1_NAME = WF1_META_CONTENT['pack'] + '.' + WF1_META_CONTENT['name']
WF1_ENTRY_POINT = TEST_PACK_PATH + '/actions/' + WF1_META_CONTENT['entry_point']
WF1_ENTRY_POINT_X = WF1_ENTRY_POINT.replace(WF1_META_FILE_NAME, 'xformed_' + WF1_META_FILE_NAME)
WF1_SPEC = yaml.safe_load(MistralRunner.get_workflow_definition(WF1_ENTRY_POINT_X))
WF1_YAML = yaml.safe_dump(WF1_SPEC, default_flow_style=False)
WF1 = workflows.Workflow(None, {'name': WF1_NAME, 'definition': WF1_YAML})
MISTRAL_EXECUTION = {'id': str(uuid.uuid4()), 'state': 'RUNNING', 'workflow_name': WF1_NAME}
WF1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
@mock.patch.object(
CUDPublisher,
'publish_update',
mock.MagicMock(return_value=None))
@mock.patch.object(
CUDPublisher,
'publish_create',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_create))
@mock.patch.object(
LiveActionPublisher,
'publish_state',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_state))
class MistralRunnerPolicyTest(DbTestCase):
@classmethod
def setUpClass(cls):
super(MistralRunnerPolicyTest, cls).setUpClass()
# Override the retry configuration here otherwise st2tests.config.parse_args
# in DbTestCase.setUpClass will reset these overrides.
cfg.CONF.set_override('retry_exp_msec', 100, group='mistral')
cfg.CONF.set_override('retry_exp_max_msec', 200, group='mistral')
cfg.CONF.set_override('retry_stop_max_msec', 200, group='mistral')
cfg.CONF.set_override('api_url', 'http://0.0.0.0:9101', group='auth')
def setUp(self):
super(MistralRunnerPolicyTest, self).setUp()
# Start with a clean database for each test.
self._establish_connection_and_re_create_db()
# Register runners.
runnersregistrar.register_runners()
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
# Register policies required for the tests.
policiesregistrar.register_policy_types(st2common)
policies_registrar = policiesregistrar.PolicyRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
policies_registrar.register_from_pack(pack)
@classmethod
def get_runner_class(cls, runner_name):
return runners.get_runner(runner_name).__class__
def _drop_all_other_policies(self, test_policy):
policy_dbs = [policy_db for policy_db in Policy.get_all() if policy_db.ref != test_policy]
for policy_db in policy_dbs:
Policy.delete(policy_db, publish=False)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_cancel_on_task_action_concurrency(self):
# Delete other policies in the test pack to avoid conflicts.
required_policy = 'mistral_tests.cancel_on_concurrency'
self._drop_all_other_policies(required_policy)
# Get threshold from the policy.
policy = Policy.get_by_ref(required_policy)
threshold = policy.parameters.get('threshold', 0)
self.assertGreater(threshold, 0)
# Launch instances of the workflow up to threshold.
for i in range(0, threshold):
liveaction = LiveActionDB(action=WF1_NAME, parameters={'friend': 'friend' + str(i)})
liveaction, execution1 = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check number of running instances
running = LiveAction.count(
action=WF1_NAME, status=action_constants.LIVEACTION_STATUS_RUNNING)
self.assertEqual(running, threshold)
# Mock the mistral runner cancel method to assert cancel is called.
mistral_runner_cls = self.get_runner_class('mistral_v2')
with mock.patch.object(mistral_runner_cls, 'cancel', mock.MagicMock(return_value=None)):
# Launch another instance of the workflow with mistral callback defined
# to indicate that this is executed under a workflow.
callback = {
'source': MISTRAL_RUNNER_NAME,
'url': 'http://127.0.0.1:8989/v2/action_executions/12345'
}
params = {'friend': 'grande animalerie'}
liveaction2 = LiveActionDB(action=WF1_NAME, parameters=params, callback=callback)
liveaction2, execution2 = action_service.request(liveaction2)
action_executions.ActionExecutionManager.update.assert_called_once_with(
'12345',
output='{"error": "Execution canceled by user."}',
state='CANCELLED'
)
liveaction2 = LiveAction.get_by_id(str(liveaction2.id))
self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_CANCELED)
# Assert cancel has been called.
mistral_runner_cls.cancel.assert_called_once_with()
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_cancel_on_task_action_concurrency_by_attr(self):
# Delete other policies in the test pack to avoid conflicts.
required_policy = 'mistral_tests.cancel_on_concurrency_by_attr'
self._drop_all_other_policies(required_policy)
# Get threshold from the policy.
policy = Policy.get_by_ref(required_policy)
threshold = policy.parameters.get('threshold', 0)
self.assertGreater(threshold, 0)
params = {'friend': 'grande animalerie'}
# Launch instances of the workflow up to threshold.
for i in range(0, threshold):
liveaction = LiveActionDB(action=WF1_NAME, parameters=params)
liveaction, execution1 = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check number of running instances
running = LiveAction.count(
action=WF1_NAME, status=action_constants.LIVEACTION_STATUS_RUNNING,
parameters__friend=params['friend'])
self.assertEqual(running, threshold)
# Mock the mistral runner cancel method to assert cancel is called.
mistral_runner_cls = self.get_runner_class('mistral_v2')
with mock.patch.object(mistral_runner_cls, 'cancel', mock.MagicMock(return_value=None)):
# Launch another instance of the workflow with mistral callback defined
# to indicate that this is executed under a workflow.
callback = {
'source': MISTRAL_RUNNER_NAME,
'url': 'http://127.0.0.1:8989/v2/action_executions/12345'
}
liveaction2 = LiveActionDB(action=WF1_NAME, parameters=params, callback=callback)
liveaction2, execution2 = action_service.request(liveaction2)
action_executions.ActionExecutionManager.update.assert_called_once_with(
'12345',
output='{"error": "Execution canceled by user."}',
state='CANCELLED'
)
liveaction2 = LiveAction.get_by_id(str(liveaction2.id))
self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_CANCELED)
# Assert cancel has been called.
mistral_runner_cls.cancel.assert_called_once_with()
|
tonybaloney/st2
|
contrib/runners/mistral_v2/tests/unit/test_mistral_v2_policy.py
|
Python
|
apache-2.0
| 11,045
|
class Solution(object):
def count_bits(self, n):
c = (n - ((n >> 1) & 0o33333333333) - ((n >> 2) & 0o11111111111))
return ((c + (c >> 3)) & 0o30707070707) % 63
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
return map(self.count_bits, xrange(num + 1))
|
ckclark/leetcode
|
py/counting-bits.py
|
Python
|
apache-2.0
| 336
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for try/except/finally handling.
This is the unified low level solution to trying a block, and executing code
when it returns, break, continues, or raises an exception. See Developer
Manual for how this maps to try/finally and try/except as in Python.
"""
from nuitka.Errors import NuitkaOptimizationError
from nuitka.optimizations.TraceCollections import TraceCollectionBranch
from .Checkers import checkStatementsSequence, checkStatementsSequenceOrNone
from .NodeBases import StatementChildrenHavingBase
from .StatementNodes import StatementsSequence
class StatementTry(StatementChildrenHavingBase):
kind = "STATEMENT_TRY"
named_children = (
"tried",
"except_handler",
"break_handler",
"continue_handler",
"return_handler",
)
checkers = {
"tried": checkStatementsSequence,
"except_handler": checkStatementsSequenceOrNone,
"break_handler": checkStatementsSequenceOrNone,
"continue_handler": checkStatementsSequenceOrNone,
"return_handler": checkStatementsSequenceOrNone,
}
def __init__(
self,
tried,
except_handler,
break_handler,
continue_handler,
return_handler,
source_ref,
):
StatementChildrenHavingBase.__init__(
self,
values={
"tried": tried,
"except_handler": except_handler,
"break_handler": break_handler,
"continue_handler": continue_handler,
"return_handler": return_handler,
},
source_ref=source_ref,
)
def computeStatement(self, trace_collection):
# This node has many children to handle, pylint: disable=I0021,too-many-branches,too-many-locals,too-many-statements
tried = self.subnode_tried
except_handler = self.subnode_except_handler
break_handler = self.subnode_break_handler
continue_handler = self.subnode_continue_handler
return_handler = self.subnode_return_handler
# The tried block must be considered as a branch, if it is not empty
# already.
collection_start = TraceCollectionBranch(
parent=trace_collection, name="try start"
)
abort_context = trace_collection.makeAbortStackContext(
catch_breaks=break_handler is not None,
catch_continues=continue_handler is not None,
catch_returns=return_handler is not None,
catch_exceptions=True,
)
with abort_context:
# As a branch point for the many types of handlers.
result = tried.computeStatementsSequence(trace_collection=trace_collection)
# We might be done entirely already.
if result is None:
return None, "new_statements", "Removed now empty try statement."
# Might be changed.
if result is not tried:
self.setChild("tried", result)
tried = result
break_collections = trace_collection.getLoopBreakCollections()
continue_collections = trace_collection.getLoopContinueCollections()
return_collections = trace_collection.getFunctionReturnCollections()
exception_collections = trace_collection.getExceptionRaiseCollections()
tried_may_raise = tried.mayRaiseException(BaseException)
# Exception handling is useless if no exception is to be raised.
if not tried_may_raise:
if except_handler is not None:
except_handler.finalize()
self.clearChild("except_handler")
trace_collection.signalChange(
tags="new_statements",
message="Removed useless exception handler.",
source_ref=except_handler.source_ref,
)
except_handler = None
# If tried may raise, even empty exception handler has a meaning to
# ignore that exception.
if tried_may_raise:
collection_exception_handling = TraceCollectionBranch(
parent=collection_start, name="except handler"
)
# When no exception exits are there, this is a problem, we just
# found an inconsistency that is a bug.
if not exception_collections:
for statement in tried.subnode_statements:
if statement.mayRaiseException(BaseException):
raise NuitkaOptimizationError(
"This statement does raise but didn't annotate an exception exit.",
statement,
)
raise NuitkaOptimizationError(
"Falsely assuming tried block may raise, but no statement says so.",
tried,
)
collection_exception_handling.mergeMultipleBranches(exception_collections)
if except_handler is not None:
result = except_handler.computeStatementsSequence(
trace_collection=collection_exception_handling
)
# Might be changed.
if result is not except_handler:
self.setChild("except_handler", result)
except_handler = result
if break_handler is not None:
if not tried.mayBreak():
break_handler.finalize()
self.clearChild("break_handler")
break_handler = None
if break_handler is not None:
collection_break = TraceCollectionBranch(
parent=collection_start, name="break handler"
)
collection_break.mergeMultipleBranches(break_collections)
result = break_handler.computeStatementsSequence(
trace_collection=collection_break
)
# Might be changed.
if result is not break_handler:
self.setChild("break_handler", result)
break_handler = result
if continue_handler is not None:
if not tried.mayContinue():
continue_handler.finalize()
self.clearChild("continue_handler")
continue_handler = None
if continue_handler is not None:
collection_continue = TraceCollectionBranch(
parent=collection_start, name="continue handler"
)
collection_continue.mergeMultipleBranches(continue_collections)
result = continue_handler.computeStatementsSequence(
trace_collection=collection_continue
)
# Might be changed.
if result is not continue_handler:
self.setChild("continue_handler", result)
continue_handler = result
if return_handler is not None:
if not tried.mayReturn():
return_handler.finalize()
self.clearChild("return_handler")
return_handler = None
if return_handler is not None:
collection_return = TraceCollectionBranch(
parent=collection_start, name="return handler"
)
collection_return.mergeMultipleBranches(return_collections)
result = return_handler.computeStatementsSequence(
trace_collection=collection_return
)
# Might be changed.
if result is not return_handler:
self.setChild("return_handler", result)
return_handler = result
# Check for trivial return handlers that immediately return, they can
# just be removed.
if return_handler is not None:
if return_handler.subnode_statements[0].isStatementReturnReturnedValue():
return_handler.finalize()
self.clearChild("return_handler")
return_handler = None
# Merge exception handler only if it is used. Empty means it is not
# aborting, as it swallows the exception.
if tried_may_raise and (
except_handler is None or not except_handler.isStatementAborting()
):
trace_collection.mergeBranches(
collection_yes=collection_exception_handling, collection_no=None
)
# An empty exception handler means we have to swallow exception.
if (
(
not tried_may_raise
or (
except_handler is not None
and except_handler.subnode_statements[
0
].isStatementReraiseException()
)
)
and break_handler is None
and continue_handler is None
and return_handler is None
):
return tried, "new_statements", "Removed useless try, all handlers removed."
tried_statements = tried.subnode_statements
pre_statements = []
while tried_statements:
tried_statement = tried_statements[0]
if tried_statement.mayRaiseException(BaseException):
break
if break_handler is not None and tried_statement.mayBreak():
break
if continue_handler is not None and tried_statement.mayContinue():
break
if return_handler is not None and tried_statement.mayReturn():
break
pre_statements.append(tried_statement)
tried_statements = list(tried_statements)
del tried_statements[0]
post_statements = []
if except_handler is not None and except_handler.isStatementAborting():
while tried_statements:
tried_statement = tried_statements[-1]
if tried_statement.mayRaiseException(BaseException):
break
if break_handler is not None and tried_statement.mayBreak():
break
if continue_handler is not None and tried_statement.mayContinue():
break
if return_handler is not None and tried_statement.mayReturn():
break
post_statements.insert(0, tried_statement)
tried_statements = list(tried_statements)
del tried_statements[-1]
if pre_statements or post_statements:
assert tried_statements # Should be dealt with already
tried.setChild("statements", tried_statements)
result = StatementsSequence(
statements=pre_statements + [self] + post_statements,
source_ref=self.source_ref,
)
def explain():
# TODO: We probably don't want to say this for re-formulation ones.
result = "Reduced scope of tried block."
if pre_statements:
result += " Leading statements at %s." % (
",".join(
x.getSourceReference().getAsString() + "/" + str(x)
for x in pre_statements
)
)
if post_statements:
result += " Trailing statements at %s." % (
",".join(
x.getSourceReference().getAsString() + "/" + str(x)
for x in post_statements
)
)
return result
return (result, "new_statements", explain)
return self, None, None
def mayReturn(self):
# TODO: If we optimized return handler away, this would be not needed
# or even non-optimal.
if self.subnode_tried.mayReturn():
return True
except_handler = self.subnode_except_handler
if except_handler is not None and except_handler.mayReturn():
return True
break_handler = self.subnode_break_handler
if break_handler is not None and break_handler.mayReturn():
return True
continue_handler = self.subnode_continue_handler
if continue_handler is not None and continue_handler.mayReturn():
return True
return_handler = self.subnode_return_handler
if return_handler is not None and return_handler.mayReturn():
return True
return False
def mayBreak(self):
# TODO: If we optimized return handler away, this would be not needed
# or even non-optimal.
if self.subnode_tried.mayBreak():
return True
except_handler = self.subnode_except_handler
if except_handler is not None and except_handler.mayBreak():
return True
break_handler = self.subnode_break_handler
if break_handler is not None and break_handler.mayBreak():
return True
continue_handler = self.subnode_continue_handler
if continue_handler is not None and continue_handler.mayBreak():
return True
return_handler = self.subnode_return_handler
if return_handler is not None and return_handler.mayBreak():
return True
return False
def mayContinue(self):
# TODO: If we optimized return handler away, this would be not needed
# or even non-optimal.
if self.subnode_tried.mayContinue():
return True
except_handler = self.subnode_except_handler
if except_handler is not None and except_handler.mayContinue():
return True
break_handler = self.subnode_break_handler
if break_handler is not None and break_handler.mayContinue():
return True
continue_handler = self.subnode_continue_handler
if continue_handler is not None and continue_handler.mayContinue():
return True
return_handler = self.subnode_return_handler
if return_handler is not None and return_handler.mayContinue():
return True
return False
def isStatementAborting(self):
except_handler = self.subnode_except_handler
if except_handler is None or not except_handler.isStatementAborting():
return False
break_handler = self.subnode_break_handler
if break_handler is not None and not break_handler.isStatementAborting():
return False
continue_handler = self.subnode_continue_handler
if continue_handler is not None and not continue_handler.isStatementAborting():
return False
return_handler = self.subnode_return_handler
if return_handler is not None and not return_handler.isStatementAborting():
return False
return self.subnode_tried.isStatementAborting()
def mayRaiseException(self, exception_type):
tried = self.subnode_tried
if tried.mayRaiseException(exception_type):
except_handler = self.subnode_except_handler
if except_handler is not None and except_handler.mayRaiseException(
exception_type
):
return True
break_handler = self.subnode_break_handler
if break_handler is not None and break_handler.mayRaiseException(
exception_type
):
return True
continue_handler = self.subnode_continue_handler
if continue_handler is not None and continue_handler.mayRaiseException(
exception_type
):
return True
return_handler = self.subnode_return_handler
if return_handler is not None and return_handler.mayRaiseException(
exception_type
):
return True
return False
def needsFrame(self):
except_handler = self.subnode_except_handler
if except_handler is not None and except_handler.needsFrame():
return True
break_handler = self.subnode_break_handler
if break_handler is not None and break_handler.needsFrame():
return True
continue_handler = self.subnode_continue_handler
if continue_handler is not None and continue_handler.needsFrame():
return True
return_handler = self.subnode_return_handler
if return_handler is not None and return_handler.needsFrame():
return True
return self.subnode_tried.needsFrame()
@staticmethod
def getStatementNiceName():
return "tried block statement"
|
kayhayen/Nuitka
|
nuitka/nodes/TryNodes.py
|
Python
|
apache-2.0
| 17,486
|
# Copyright (C) 2015 zulily, llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""node_status version"""
__version__ = '0.1.0'
|
zulily/node_status
|
node_status/version.py
|
Python
|
apache-2.0
| 623
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import sys
from distutils.core import setup, Distribution
from aws.cfn import bridge
name = 'aws-cfn-resource-bridge'
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
print >> sys.stderr, "Python 2.6+ is required"
sys.exit(1)
rpm_requires = ['python >= 2.6', 'python-daemon', 'python-botocore >= 0.17.0']
dependencies = ['python-daemon>=1.5.2', 'botocore>=0.17.0']
if sys.version_info[:2] == (2, 6):
# For python2.6 we have to require argparse
rpm_requires.append('python-argparse >= 1.1')
dependencies.append('argparse>=1.1')
_opts = {
'build_scripts': {'executable': '/usr/bin/env python'},
'bdist_rpm': {'requires': rpm_requires}
}
_data_files = [('share/doc/%s-%s' % (name, bridge.__version__), ['NOTICE.txt', 'LICENSE']),
('init/redhat', ['init/redhat/cfn-resource-bridge']),
('init/ubuntu', ['init/ubuntu/cfn-resource-bridge'])]
try:
import py2exe
_opts['py2exe'] = {
# TODO: Need to update this for this package
'typelibs': [('{000C1092-0000-0000-C000-000000000046}', 1033, 1, 0),
('{E34CB9F1-C7F7-424C-BE29-027DCC09363A}', 0, 1, 0)],
'excludes': ['certifi', 'pyreadline', 'difflib', 'distutils', 'doctest', 'pdb', 'inspect', 'unittest',
'adodbapi'],
'includes': ['chardet', 'dbhash', 'dumbdbm'],
'dll_excludes': ['msvcr71.dll', 'w9xpopen.exe', ''],
'compressed': True,
'com_server': [],
'ctypes_com_server': [],
'service': ["aws.cfn.bridge.winbridge"],
'isapi': [],
'windows': [],
'zipfile': 'library.zip',
'console': ['bin/cfn-resource-bridge']
}
_data_files = [('', ['license/win/NOTICE.txt', 'license/win/LICENSE.rtf'])]
except ImportError:
pass
setup_options = dict(
name=name,
version=bridge.__version__,
description='A custom resource framework for AWS CloudFormation',
long_description=open('README.md').read(),
author='AWS CloudFormation',
url='http://aws.amazon.com/cloudformation/',
license='Apache License 2.0',
scripts=['bin/cfn-resource-bridge'],
classifiers=[],
packages=[
'aws',
'aws.cfn',
'aws.cfn.bridge'
],
install_requires=dependencies,
data_files=_data_files,
options=_opts
)
setup(**setup_options)
|
dldinternet/aws-cfn-resource-bridge
|
setup.py
|
Python
|
apache-2.0
| 3,161
|
__version__ = "1.3.15"
|
F5Networks/f5-icontrol-rest-python
|
icontrol/__init__.py
|
Python
|
apache-2.0
| 23
|
from __future__ import absolute_import, print_function
import logging
from datetime import datetime
from changes.config import db
from changes.models import Repository, RepositoryStatus
from changes.queue.task import tracked_task
logger = logging.getLogger('repo.sync')
@tracked_task(max_retries=None)
def import_repo(repo_id, parent=None):
repo = Repository.query.get(repo_id)
if not repo:
logger.error('Repository %s not found', repo_id)
return
vcs = repo.get_vcs()
if vcs is None:
logger.warning('Repository %s has no VCS backend set', repo.id)
return
if repo.status == RepositoryStatus.inactive:
logger.info('Repository %s is inactive', repo.id)
return
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update_attempt': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
if vcs.exists():
vcs.update()
else:
vcs.clone()
for commit in vcs.log(parent=parent):
revision, created = commit.save(repo)
db.session.commit()
parent = commit.id
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update': datetime.utcnow(),
'status': RepositoryStatus.active,
}, synchronize_session=False)
db.session.commit()
if parent:
import_repo.delay(
repo_id=repo.id.hex,
task_id=repo.id.hex,
parent=parent,
)
|
alex/changes
|
changes/jobs/import_repo.py
|
Python
|
apache-2.0
| 1,505
|