code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""Create a KAF file for each act in a FoLiA file
Usage: python folia2kaf.py <file in> <output dir>
Or: ./generate_kaf.sh <dir in> <dir out>
"""
from lxml import etree
from bs4 import BeautifulSoup
from embem.emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
_folia_pos2kaf_pos = {
'WW': 'V', # Werkwoord -> Verb
'VNW': 'Q', # Voornaamwoord -> Pronoun
'VZ': 'P', # Voorzetsel -> Preposition
'LID': 'D', # Lidwoord -> Determiner
'N': 'N', # Zelfstandig naamwoord -> Common noun
'VG': 'C', # Voegwoord -> Conjunction
'TW': 'O', # Interjection (?) -> Other
'BW': 'A', # Bijwoord -> Adverb
'LET': 'O', # Interpunctie -> Other
'TSW': 'O', # Ja/nee(?) -> Other
'ADJ': 'G', # Bijvoegelijk naamwoord -> Adjective
'SPEC': 'O' # Special (niet nader gespecificeerd) -> Other
# Geen folia_pos voor Proper Noun (R)
}
def kaf_file_name(input_file, act_number):
head, tail = os.path.split(input_file)
p = tail.split('.')
return '{n}__act-0{a}.kaf'.format(a=act_number, n=p[0])
def add_word2kaf(elem, w_id, s_id, term_id, text, terms):
w_id = elem.get('xml:id')
w = etree.SubElement(text, 'wf', wid=w_id, sent=s_id)
w.text = unicode(elem.t.string)
lemma = elem.lemma.get('class')
pos = _folia_pos2kaf_pos[elem.pos.get('head', 'SPEC')]
t_id = 't{wid}'.format(wid=term_id)
t = etree.SubElement(terms, 'term', tid=t_id, type='open', lemma=lemma,
pos=pos)
s = etree.SubElement(t, 'span')
target = etree.SubElement(s, 'target', id=w_id)
def act2kaf(act_xml, sentence_id):
"""Convert act to kaf xml. Returns an XML tree that can be written to file.
"""
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
kaf_document = None
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
term_id = 1
# create output kaf xml tree for act
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
terms = etree.SubElement(root, 'terms')
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
sentence_id += 1
elif word(elem) and not note(elem.parent.parent):
add_word2kaf(elem, w_id, str(sentence_id), term_id, text,
terms)
term_id += 1
return kaf_document, sentence_id
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file to '
'generate KAF files for')
parser.add_argument('output_dir', help='the directory where the '
'generated KAF files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
act_number = 0
s_id = 0 # in KAF, sentence numbers must be integers
w_id = None
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
kaf_document, s_id = act2kaf(act_xml, s_id)
if kaf_document:
act_number += 1
# write kaf xml tree to file
kaf_file = '{}{}{}'.format(output_dir, os.sep,
kaf_file_name(file_name,
act_number))
print kaf_file
with open(kaf_file, 'w') as f:
kaf_document.write(f,
xml_declaration=True,
encoding='utf-8',
method='xml',
pretty_print=True)
|
NLeSC/embodied-emotions-scripts
|
embem/kaf-tag/folia2kaf.py
|
Python
|
apache-2.0
| 4,195
|
from prime import isPrime
summ = 0 # count = 0
i = 1
while i <= 100:
if isPrime(i):
summ = summ + i # count = count + 1
i = i + 1
print 'Sum of primes from 1 to 100 :', summ # count
|
brijeshrakholia/iCode
|
day3/count.py
|
Python
|
apache-2.0
| 195
|
from math import sqrt
def convert_product_id_to_name(id):
"""
Given product id number, return product name
"""
product_id_to_name = {}
if id in product_id_to_name:
return product_id_to_name[id]
else:
return id
def cosine_similarity(rating1, rating2):
dot_product = 0
length_of_vector_x = 0
length_of_vector_y = 0
for key in rating1:
if key in rating2:
dot_product += (rating1[key] * rating2[key])
length_of_vector_x += pow(rating1[key], 2)
length_of_vector_y += pow(rating2[key], 2)
if dot_product == 0 or length_of_vector_x == 0 or length_of_vector_y == 0:
return 0
else:
return dot_product / ((sqrt(length_of_vector_x)) * sqrt(length_of_vector_y))
def compute_nearest_neighbor(username):
"""
Creates a sorted list of users based on their distance to the username
"""
distances = []
for instance in users:
if instance != username:
distance = cosine_similarity(users[username], users[instance])
distances.append((instance, distance))
distances.sort(key=lambda artist_tuple: artist_tuple[1], reverse=True)
return distances
def recommend(user):
"""Give list of recommendations"""
user = user
k_nearest_neighbor_value = 3
recommendations = {}
nearest_neighbors = compute_nearest_neighbor(user)
# now get the ratings for the user
user_ratings = users[user]
# determine the total distance
total_distance = 0.0
for i in range(k_nearest_neighbor_value):
total_distance += nearest_neighbors[i][1]
# Iterate through the k nearest neighbors accumulating their ratings
for i in range(k_nearest_neighbor_value):
weight = nearest_neighbors[i][1] / total_distance
nearest_neighbor_name = nearest_neighbors[i][0]
nearest_neighbor_ratings = users[nearest_neighbor_name]
# now find bands neighbor rated that user didn't
for neighbor in nearest_neighbor_ratings:
if not neighbor in user_ratings:
if neighbor not in recommendations:
recommendations[neighbor] = (nearest_neighbor_ratings[neighbor] * weight)
else:
recommendations[neighbor] = (recommendations[neighbor] + nearest_neighbor_ratings[neighbor] * weight)
# now make list from dictionary
recommendations = list(recommendations.items())
recommendations = [(convert_product_id_to_name(k), v) for (k, v) in recommendations]
# finally sort and return
recommendations.sort(key=lambda neighborTuple: neighborTuple[1], reverse=True)
return recommendations
|
dolel13/GeekNightRecommendationEngine
|
algorithms/recommend.py
|
Python
|
apache-2.0
| 2,701
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset // 256,
A.data,
A.elem_offset // 256,
B.data,
B.elem_offset // 256,
C.data,
C.elem_offset // 256,
dtype="handle",
)
)
@T.prim_func
def dot_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,))
B = T.match_buffer(b, (4,))
C = T.match_buffer(c, ())
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[()] = C[()] + A[vi] * B[vi]
@T.prim_func
def dot_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), offset_factor=1)
B = T.match_buffer(b, (4,), offset_factor=1)
C = T.match_buffer(c, (), offset_factor=1)
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
T.evaluate(
T.call_extern(
"vec4add",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def outer_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
for i, j in T.grid(16, 16):
with T.block("update"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = C[vii, vjj] + A[vii, 0] * B[vjj, 0]
@T.prim_func
def outer_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.call_extern(
"outer_product",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def matmul(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=128, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=128, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=128, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def batch_matmul(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@T.prim_func
def tensorized_batch_matmul_mma(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n in range(0, 16):
for i, j, k in T.grid(8, 8, 8):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
T.reads(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
)
T.writes(C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
(16, 16),
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def tensorized_batch_matmul_dot_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k_0 in T.grid(16, 128, 128, 32):
with T.block("blockized_update"):
vn, vi, vj, vko = T.axis.remap("SSSR", [n, i, j, k_0])
T.reads(
C[vn, vi, vj], A[vn, vi, vko * 4 : vko * 4 + 4], B[vn, vj, vko * 4 : vko * 4 + 4]
)
T.writes(C[vn, vi, vj])
A_1 = T.match_buffer(
A[vn, vi, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
B_1 = T.match_buffer(
B[vn, vj, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(C[vn, vi, vj], [], dtype="float32", offset_factor=1)
T.evaluate(
T.call_extern(
"vec4add",
C_1.data,
C_1.elem_offset,
A_1.data,
A_1.elem_offset,
B_1.data,
B_1.elem_offset,
dtype="int32",
)
)
@T.prim_func
def tensorized_batch_matmul_outer_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i_0, j_0, k in T.grid(16, 8, 8, 128):
with T.block("blockized_update"):
vn, vio, vjo, vk = T.axis.remap("SSSR", [n, i_0, j_0, k])
T.reads(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vn, vio * 16 : vio * 16 + 16, vk],
B[vn, vjo * 16 : vjo * 16 + 16, vk],
)
T.writes(C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(A[vn, vio * 16 : vio * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1)
B_1 = T.match_buffer(B[vn, vjo * 16 : vjo * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16], [16, 16], dtype="float32", offset_factor=1
)
T.evaluate(
T.call_extern("outer_product", C_1.data, C_1.elem_offset, A_1.data, A_1.elem_offset,
B_1.data, B_1.elem_offset, dtype="int32"
)
)
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
tir.TensorIntrin.register("test_dot_product_intrin", dot_product_desc, dot_product_intrin)
tir.TensorIntrin.register("test_outer_product_intrin", outer_product_desc, outer_product_intrin)
def test_tensorize_matmul():
func = matmul
# schedule
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_batch_matmul():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
_, i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_mma, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=batch_matmul)
def test_tensorize_dot_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, _, _, k = s.get_loops(C)
_, ki = s.split(k, factors=[None, 4])
s.tensorize(ki, "test_dot_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_dot_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_outer_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
s.reorder(io, jo, k, ii, ji)
s.tensorize(ii, "test_outer_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_outer_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
dmlc/tvm
|
tests/python/unittest/test_tir_schedule_tensorize.py
|
Python
|
apache-2.0
| 16,079
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v2.volume_encryption_types import VolumeEncryptionType
from cinderclient.tests.unit import utils
from cinderclient.tests.unit.v2 import fakes
cs = fakes.FakeClient()
class VolumeEncryptionTypesTest(utils.TestCase):
"""
Test suite for the Volume Encryption Types Resource and Manager.
"""
def test_list(self):
"""
Unit test for VolumeEncryptionTypesManager.list
Verify that a series of GET requests are made:
- one GET request for the list of volume types
- one GET request per volume type for encryption type information
Verify that all returned information is :class: VolumeEncryptionType
"""
encryption_types = cs.volume_encryption_types.list()
cs.assert_called_anytime('GET', '/types')
cs.assert_called_anytime('GET', '/types/2/encryption')
cs.assert_called_anytime('GET', '/types/1/encryption')
for encryption_type in encryption_types:
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that one GET request is made for the volume type encryption
type information. Verify that returned information is :class:
VolumeEncryptionType
"""
encryption_type = cs.volume_encryption_types.get(1)
cs.assert_called('GET', '/types/1/encryption')
self.assertIsInstance(encryption_type, VolumeEncryptionType)
def test_get_no_encryption(self):
"""
Unit test for VolumeEncryptionTypesManager.get
Verify that a request on a volume type with no associated encryption
type information returns a VolumeEncryptionType with no attributes.
"""
encryption_type = cs.volume_encryption_types.get(2)
self.assertIsInstance(encryption_type, VolumeEncryptionType)
self.assertFalse(hasattr(encryption_type, 'id'),
'encryption type has an id')
def test_create(self):
"""
Unit test for VolumeEncryptionTypesManager.create
Verify that one POST request is made for the encryption type creation.
Verify that encryption type creation returns a VolumeEncryptionType.
"""
result = cs.volume_encryption_types.create(2, {'provider': 'Test',
'key_size': None,
'cipher': None,
'control_location':
None})
cs.assert_called('POST', '/types/2/encryption')
self.assertIsInstance(result, VolumeEncryptionType)
def test_update(self):
"""
Unit test for VolumeEncryptionTypesManager.update
"""
self.skipTest("Not implemented")
def test_delete(self):
"""
Unit test for VolumeEncryptionTypesManager.delete
Verify that one DELETE request is made for encryption type deletion
Verify that encryption type deletion returns None
"""
result = cs.volume_encryption_types.delete(1)
cs.assert_called('DELETE', '/types/1/encryption/provider')
self.assertIsNone(result, "delete result must be None")
|
eayunstack/python-cinderclient
|
cinderclient/tests/unit/v2/test_volume_encryption_types.py
|
Python
|
apache-2.0
| 4,019
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import s3
from nova import network
from nova.network.security_group import quantum_driver
from nova.objects import instance as instance_obj
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
ec2_opts = [
cfg.StrOpt('ec2_host',
default='$my_ip',
help='the ip of the ec2 api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='the internal ip of the ec2 api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='the port of the ec2 api server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='the protocol to use when connecting to the ec2 api '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='the path prefix used to call the ec2 api server'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val=val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val=val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
def db_to_inst_obj(context, db_instance):
# NOTE(danms): This is a temporary helper method for converting
# Instance DB objects to NovaObjects without needing to re-query.
inst_obj = instance_obj.Instance._from_db_object(
instance_obj.Instance(), db_instance,
expected_attrs=['system_metadata', 'metadata'])
inst_obj._context = context
return inst_obj
class CloudController(object):
"""CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
def _enforce_valid_instance_ids(self, context, instance_ids):
# NOTE(mikal): Amazon's implementation of the EC2 API requires that
# _all_ instance ids passed in be valid.
instances = {}
if instance_ids:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances[ec2_id] = instance
return instances
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
result = []
for zone in available_zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = db.service_get_all(context, False)
enabled_services = availability_zones.set_availability_zones(context,
enabled_services)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service['binary'],
'zoneState': ('%s %s %s'
% (active, art,
service['updated_at']))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if CONF.region_list:
regions = []
for region in CONF.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
CONF.ec2_port,
CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
CONF.ec2_host,
CONF.ec2_port,
CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
formatted_snapshots = []
for s in snapshots:
formatted = self._format_snapshot(context, s)
if formatted:
formatted_snapshots.append(formatted)
return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
# NOTE(mikal): this is just a set of strings in cinder. If they
# implement an enum, then we should move this code to use it. The
# valid ec2 statuses are "pending", "completed", and "error".
status_map = {'new': 'pending',
'creating': 'pending',
'available': 'completed',
'active': 'completed',
'deleting': 'pending',
'deleted': None,
'error': 'error'}
mapped_status = status_map.get(snapshot['status'], snapshot['status'])
if not mapped_status:
return None
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
args = (context, volume_id, kwargs.get('name'),
kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
db.ec2_snapshot_create(context, snapshot['id'])
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
self.volume_api.delete_snapshot(context, snapshot_id)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(msg,
code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
try:
keypair = self.keypair_api.create_key_pair(context,
context.user_id,
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
try:
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group['description']
g['groupName'] = group['name']
g['ownerId'] = group['project_id']
g['ipPermissions'] = []
for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule['group_id']:
if rule.get('grantee_group'):
source_group = rule['grantee_group']
r['groups'] += [{'groupName': source_group['name'],
'userId': source_group['project_id']}]
else:
# rule is not always joined with grantee_group
# for example when using quantum driver.
source_group = self.security_group_api.get(
context, id=rule['group_id'])
r['groups'] += [{'groupName': source_group.get('name'),
'userId': source_group.get('project_id')}]
if rule['protocol']:
r['ipProtocol'] = rule['protocol'].lower()
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule['protocol']
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = db.security_group_get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group['id']
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
err = _('Invalid IP protocol %s.') % values['ip_protocol']
raise exception.EC2APIError(message=err, code="400")
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
self._validate_security_group_protocol(values)
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
security_group = self.security_group_api.get(context, group_name,
group_id)
self.security_group_api.destroy(context, security_group)
return True
def get_password_data(self, context, instance_id, **kwargs):
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
# NOTE(vish): this should be timestamp from the metadata fields
# but it isn't important enough to implement properly
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"passwordData": output}
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_ec2_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
valid_ec2_api_volume_status_map = {
'attaching': 'in-use',
'detaching': 'in-use'}
instance_ec2_id = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
instance = db.instance_get_by_uuid(context.elevated(),
instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = valid_ec2_api_volume_status_map.get(volume['status'],
volume['status'])
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
db.ec2_volume_create(context, volume['id'])
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
self.volume_api.delete(context, volume_id)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
'at %(device)s'),
{'volume_id': volume_id,
'instance_id': instance_id,
'device': device},
context=context)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Attach Failed.'))
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
if volume['instance_uuid']:
try:
return db.instance_get_by_uuid(context,
volume['instance_uuid'])
except exception.InstanceNotFound:
pass
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self._get_instance_from_volume(context, volume)
try:
self.compute_api.detach_volume(context, instance, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Detach Volume Failed.'))
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.EC2APIError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
validate_ec2_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
result = {'instance_id': instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(vm_states.DELETED,
True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
mapping = []
for bdm in block_device.legacy_mapping(
db.block_device_mapping_get_all_by_instance(context,
instance_uuid)):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': ec2utils.id_to_ec2_vol_id(volume_id),
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '',
'status': vol['attach_status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
instance_type = flavors.extract_flavor(instance)
result['instanceType'] = instance_type['name']
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
if not instances_cache:
instances_cache = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
if ec2_id in instances_cache:
instances.append(instances_cache[ec2_id])
else:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc')
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
i['publicDnsName'] = floating_ip
i['ipAddress'] = floating_ip or fixed_ip
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
i['tagSet'] = []
for k, v in self.compute_api.get_instance_metadata(
context, instance).iteritems():
i['tagSet'].append({'key': k, 'value': v})
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
zone = ec2utils.get_availability_zone_by_host(host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
except exception.FloatingIpLimitExceeded:
raise exception.EC2APIError(_('No more floating IPs available'))
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_('Release address %s'), public_ip, context=context)
try:
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
except exception.FloatingIpNotFound:
raise exception.EC2APIError(_('Unable to release IP Address.'))
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to instance "
"%(instance_id)s"),
{'public_ip': public_ip, 'instance_id': instance_id},
context=context)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.EC2APIError(msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
except exception.FloatingIpAssociated:
msg = _('Floating ip is already associated.')
raise exception.EC2APIError(msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
except Exception:
msg = _('Error, unable to associate floating ip.')
LOG.exception(msg)
raise exception.EC2APIError(msg)
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
try:
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated.')
raise exception.EC2APIError(msg)
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise exception.EC2APIError(msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
min_count = int(kwargs.get('min_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.EC2APIError(_('Image must be available'))
(instances, resv_id) = self.compute_api.create(context,
instance_type=flavors.get_flavor_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)
def _ec2_ids_to_instances(self, context, instance_id, objects=False):
"""Get all instances first, to prevent partial executions."""
instances = []
extra = ['system_metadata', 'metadata']
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
if objects:
instance = instance_obj.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=extra)
else:
instance = self.compute_api.get(context, instance_uuid)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified.
"""
previous_states = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start terminating instances"))
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id, True)
LOG.debug(_("Going to stop instances"))
for instance in instances:
self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id, True)
LOG.debug(_("Going to start instances"))
for instance in instances:
self.compute_api.start(context, instance)
return True
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
raise exception.EC2APIError(_('imageLocation is required'))
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
LOG.audit(_('Registered image %(image_location)s with id '
'%(image_id)s'),
{'image_location': image_location, 'image_id': image_id},
context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
if 'user_group' not in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s')
raise exception.EC2APIError(msg % image_id)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid)
bdms = self.compute_api.get_instance_bdms(context, instance)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance,
bdms):
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % {'root': instance['root_device_name'],
'ec2_instance_id': ec2_instance_id}
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
inst_obj = db_to_inst_obj(context, instance)
self.compute_api.stop(context, inst_obj)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_uuid)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
raise exception.EC2APIError(
_('Couldn\'t stop instance with in %d sec') % timeout)
glance_uuid = instance['image_ref']
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance['uuid'], now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
inst_obj = db_to_inst_obj(context, instance)
self.compute_api.start(context, inst_obj)
return {'imageId': ec2_id}
def create_tags(self, context, **kwargs):
"""Add tags to a resource
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
raise exception.EC2APIError(_('resource_id and tag are required'))
if not isinstance(resources, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of resources'))
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
raise exception.EC2APIError(_('Only instances implemented'))
if not isinstance(tags, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of tagSets'))
metadata = {}
for tag in tags:
if not isinstance(tag, dict):
raise exception.EC2APIError(_
('Expecting tagSet to be key/value pairs'))
key = tag.get('key', None)
val = tag.get('value', None)
if key is None or val is None:
raise exception.EC2APIError(_
('Expecting both key and value to be set'))
metadata[key] = val
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
self.compute_api.update_instance_metadata(context,
instance, metadata)
return True
def delete_tags(self, context, **kwargs):
"""Delete tags
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
raise exception.EC2APIError(_('resource_id and tag are required'))
if not isinstance(resources, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of resources'))
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
raise exception.EC2APIError(_('Only instances implemented'))
if not isinstance(tags, (tuple, list, set)):
raise exception.EC2APIError(_('Expecting a list of tagSets'))
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
for tag in tags:
if not isinstance(tag, dict):
raise exception.EC2APIError(_
('Expecting tagSet to be key/value pairs'))
key = tag.get('key', None)
if key is None:
raise exception.EC2APIError(_('Expecting key to be set'))
self.compute_api.delete_instance_metadata(context,
instance, key)
return True
def describe_tags(self, context, **kwargs):
"""List tags
Returns a dict with a single key 'tagSet' on success, error on failure.
:param context: context under which the method is called
"""
filters = kwargs.get('filter', None)
search_filts = []
if filters:
for filter_block in filters:
key_name = filter_block.get('name', None)
val = filter_block.get('value', None)
if val:
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
if key_name:
search_block = {}
if key_name == 'resource_id':
search_block['resource_id'] = []
for res_id in val:
search_block['resource_id'].append(
ec2utils.ec2_inst_id_to_uuid(context, res_id))
elif key_name in ['key', 'value']:
search_block[key_name] = val
elif key_name == 'resource_type':
for res_type in val:
if res_type != 'instance':
raise exception.EC2APIError(_
('Only instances implemented'))
search_block[key_name] = 'instance'
if len(search_block.keys()) > 0:
search_filts.append(search_block)
ts = []
for tag in self.compute_api.get_all_instance_metadata(context,
search_filts):
ts.append({
'resource_id': ec2utils.id_to_ec2_inst_id(tag['instance_id']),
'resource_type': 'instance',
'key': tag['key'],
'value': tag['value']
})
return {"tagSet": ts}
class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.EC2APIError(_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_not_found(msg):
pass
class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class CloudSecurityGroupQuantumAPI(EC2SecurityGroupExceptions,
quantum_driver.SecurityGroupAPI):
pass
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
elif cfg.CONF.security_group_api.lower() == 'quantum':
return CloudSecurityGroupQuantumAPI()
else:
raise NotImplementedError()
|
DirectXMan12/nova-hacking
|
nova/api/ec2/cloud.py
|
Python
|
apache-2.0
| 80,859
|
from ...utils.helpers import CacheDict
from ...exceptions import SmtlibError
from .expression import *
from functools import lru_cache
import copy
import logging
import operator
logger = logging.getLogger(__name__)
UNSIGN_MASK = (1 << 256) - 1
class Visitor:
""" Class/Type Visitor
Inherit your class visitor from this one and get called on a different
visiting function for each type of expression. It will call the first
implemented method for the __mro__ class order.
For example for a BitVecAdd it will try
visit_BitVecAdd() if not defined then it will try with
visit_BitVecOperation() if not defined then it will try with
visit_BitVec() if not defined then it will try with
visit_Operation() if not defined then it will try with
visit_Expression()
Other class named visitors are:
visit_Constant()
visit_Variable()
visit_Operation()
visit_BitVec()
visit_Bool()
visit_Array()
"""
def __init__(self, cache=None, **kwargs):
super().__init__()
self._stack = []
self._cache = {} if cache is None else cache
def push(self, value):
assert value is not None
self._stack.append(value)
def pop(self):
if len(self._stack) == 0:
return None
result = self._stack.pop()
return result
@property
def result(self):
assert len(self._stack) == 1
return self._stack[-1]
def _method(self, expression, *args):
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = "visit_%s" % sort
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
return value
return self._rebuild(expression, args)
def visit(self, node_arg, use_fixed_point=False):
"""
The entry point of the visitor.
The exploration algorithm is a DFS post-order traversal
The implementation used two stacks instead of a recursion
The final result is store in self.result
:param node: Node to explore
:type node: Expression
:param use_fixed_point: if True, it runs _methods until a fixed point is found
:type use_fixed_point: Bool
"""
if isinstance(node_arg, ArrayProxy):
node = node_arg.array
else:
node = node_arg
cache = self._cache
visited = set()
stack = []
stack.append(node)
while stack:
node = stack.pop()
if node in cache:
self.push(cache[node])
elif isinstance(node, Operation):
if node in visited:
operands = [self.pop() for _ in range(len(node.operands))]
value = self._method(node, *operands)
visited.remove(node)
self.push(value)
cache[node] = value
else:
visited.add(node)
stack.append(node)
stack.extend(node.operands)
else:
self.push(self._method(node))
if use_fixed_point:
old_value = None
new_value = self.pop()
while old_value is not new_value:
self.visit(new_value)
old_value = new_value
new_value = self.pop()
if isinstance(node_arg, ArrayProxy):
new_value = ArrayProxy(new_value)
new_value._default = node_arg._default
new_value._written = set(node_arg.written)
new_value._concrete_cache = dict(node_arg._concrete_cache)
self.push(new_value)
@staticmethod
def _rebuild(expression, operands):
if isinstance(expression, Operation):
if any(x is not y for x, y in zip(expression.operands, operands)):
aux = copy.copy(expression)
aux._operands = operands
return aux
return expression
class Translator(Visitor):
""" Simple visitor to translate an expression into something else
"""
def _method(self, expression_arg, *args):
# Special case. Need to get the unsleeved version of the array
expression = expression_arg
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = f"visit_{sort:s}"
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
return value
raise SmtlibError(f"No translation for this {expression}")
class GetDeclarations(Visitor):
""" Simple visitor to collect all variables in an expression or set of
expressions
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.variables = set()
def visit_Variable(self, expression):
self.variables.add(expression)
@property
def result(self):
return self.variables
class GetDepth(Translator):
""" Simple visitor to collect all variables in an expression or set of
expressions
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def visit_Expression(self, expression):
return 1
def visit_Operation(self, expression, *operands):
return 1 + max(operands)
def get_depth(exp):
visitor = GetDepth()
visitor.visit(exp)
return visitor.result
class PrettyPrinter(Visitor):
def __init__(self, depth=None, **kwargs):
super().__init__(**kwargs)
self.output = ""
self.indent = 0
self.depth = depth
def _print(self, s, e=None):
self.output += " " * self.indent + str(s) # + '(%016x)'%hash(e)
self.output += "\n"
def visit(self, expression):
"""
Overload Visitor.visit because:
- We need a pre-order traversal
- We use a recursion as it makes it easier to keep track of the indentation
"""
self._method(expression)
def _method(self, expression, *args):
"""
Overload Visitor._method because we want to stop to iterate over the
visit_ functions as soon as a valid visit_ function is found
"""
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = "visit_%s" % sort
method = getattr(self, methodname, None)
if method is not None:
method(expression, *args)
return
return
def visit_Operation(self, expression, *operands):
self._print(expression.__class__.__name__, expression)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print("...")
self.indent -= 2
return ""
def visit_BitVecExtract(self, expression):
self._print(
expression.__class__.__name__ + "{%d:%d}" % (expression.begining, expression.end),
expression,
)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print("...")
self.indent -= 2
return ""
def visit_Constant(self, expression):
self._print(expression.value)
return ""
def visit_Variable(self, expression):
self._print(expression.name)
return ""
@property
def result(self):
return self.output
def pretty_print(expression, **kwargs):
if not isinstance(expression, Expression):
return str(expression)
pp = PrettyPrinter(**kwargs)
pp.visit(expression)
return pp.result
class ConstantFolderSimplifier(Visitor):
def __init__(self, **kw):
super().__init__(**kw)
operations = {
BitVecAdd: operator.__add__,
BitVecSub: operator.__sub__,
BitVecMul: operator.__mul__,
BitVecDiv: operator.__truediv__,
BitVecShiftLeft: operator.__lshift__,
BitVecShiftRight: operator.__rshift__,
BitVecAnd: operator.__and__,
BitVecOr: operator.__or__,
BitVecXor: operator.__xor__,
BitVecNot: operator.__not__,
BitVecNeg: operator.__invert__,
LessThan: operator.__lt__,
LessOrEqual: operator.__le__,
BoolEqual: operator.__eq__,
GreaterThan: operator.__gt__,
GreaterOrEqual: operator.__ge__,
BoolAnd: operator.__and__,
BoolOr: operator.__or__,
BoolNot: operator.__not__,
BitVecUnsignedDiv: lambda x, y: (x & UNSIGN_MASK) // (y & UNSIGN_MASK),
UnsignedLessThan: lambda x, y: (x & UNSIGN_MASK) < (y & UNSIGN_MASK),
UnsignedLessOrEqual: lambda x, y: (x & UNSIGN_MASK) <= (y & UNSIGN_MASK),
UnsignedGreaterThan: lambda x, y: (x & UNSIGN_MASK) > (y & UNSIGN_MASK),
UnsignedGreaterOrEqual: lambda x, y: (x & UNSIGN_MASK) >= (y & UNSIGN_MASK),
}
def visit_BitVecConcat(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
result = 0
for o in operands:
result <<= o.size
result |= o.value
return BitVecConstant(expression.size, result, taint=expression.taint)
def visit_BitVecZeroExtend(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
return BitVecConstant(expression.size, operands[0].value, taint=expression.taint)
def visit_BitVecSignExtend(self, expression, *operands):
if expression.extend == 0:
return operands[0]
def visit_BitVecExtract(self, expression, *operands):
if all(isinstance(o, Constant) for o in expression.operands):
value = expression.operands[0].value
begining = expression.begining
end = expression.end
value = value >> begining
mask = 2 ** (end - begining + 1) - 1
value = value & mask
return BitVecConstant(expression.size, value, taint=expression.taint)
def visit_BoolAnd(self, expression, a, b):
if isinstance(a, Constant) and a.value == True:
return b
if isinstance(b, Constant) and b.value == True:
return a
def visit_Operation(self, expression, *operands):
""" constant folding, if all operands of an expression are a Constant do the math """
operation = self.operations.get(type(expression), None)
if operation is not None and all(isinstance(o, Constant) for o in operands):
value = operation(*(x.value for x in operands))
if isinstance(expression, BitVec):
return BitVecConstant(expression.size, value, taint=expression.taint)
else:
isinstance(expression, Bool)
return BoolConstant(value, taint=expression.taint)
else:
if any(operands[i] is not expression.operands[i] for i in range(len(operands))):
expression = self._rebuild(expression, operands)
return expression
constant_folder_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128, typed=True)
def constant_folder(expression):
global constant_folder_simplifier_cache
simp = ConstantFolderSimplifier(cache=constant_folder_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
class ArithmeticSimplifier(Visitor):
def __init__(self, parent=None, **kw):
super().__init__(**kw)
@staticmethod
def _same_constant(a, b):
return isinstance(a, Constant) and isinstance(b, Constant) and a.value == b.value or a is b
@staticmethod
def _changed(expression, operands):
if isinstance(expression, Constant) and len(operands) > 0:
return True
arity = len(operands)
return any(operands[i] is not expression.operands[i] for i in range(arity))
def visit_Operation(self, expression, *operands):
""" constant folding, if all operands of an expression are a Constant do the math """
if all(isinstance(o, Constant) for o in operands):
expression = constant_folder(expression)
if self._changed(expression, operands):
expression = self._rebuild(expression, operands)
return expression
def visit_BitVecZeroExtend(self, expression, *operands):
if self._changed(expression, operands):
return BitVecZeroExtend(expression.size, *operands, taint=expression.taint)
else:
return expression
def visit_BoolAnd(self, expression, *operands):
if isinstance(expression.operands[0], Constant) and expression.operands[0].value:
return expression.operands[1]
if isinstance(expression.operands[1], Constant) and expression.operands[1].value:
return expression.operands[0]
# AND ( EQ (EXTRACT(0,8, a), EXTRACT(0,8, b)), EQ (EXTRACT(8,16, a), EXTRACT(8,16 b)) ->
# EQ(EXTRACT(0,16, a), EXTRACT(0,16, b))
if isinstance(expression.operands[0], BoolEqual) and isinstance(
expression.operands[1], BoolEqual
):
# Eq operands
operand_0 = expression.operands[0]
operand_1 = expression.operands[1]
# Extract operands
operand_0_0 = operand_0.operands[0]
operand_0_1 = operand_0.operands[1]
operand_1_0 = operand_1.operands[0]
operand_1_1 = operand_1.operands[1]
if (
isinstance(operand_0_0, BitVecExtract)
and isinstance(operand_0_1, BitVecExtract)
and isinstance(operand_1_0, BitVecExtract)
and isinstance(operand_1_1, BitVecExtract)
):
if (
operand_0_0.value is operand_1_0.value
and operand_0_1.value is operand_1_1.value
and (operand_0_0.begining, operand_0_0.end)
== (operand_0_1.begining, operand_0_1.end)
and (operand_1_0.begining, operand_1_0.end)
== (operand_1_1.begining, operand_1_1.end)
):
if ((operand_0_0.end + 1) == operand_1_0.begining) or (
operand_0_0.begining == (operand_1_0.end + 1)
):
value0 = operand_0_0.value
value1 = operand_0_1.value
beg = min(operand_0_0.begining, operand_1_0.begining)
end = max(operand_0_0.end, operand_1_0.end)
return BitVecExtract(value0, beg, end - beg + 1) == BitVecExtract(
value1, beg, end - beg + 1
)
def visit_BoolNot(self, expression, *operands):
if isinstance(expression.operands[0], BoolNot):
return expression.operands[0].operands[0]
def visit_BoolEqual(self, expression, *operands):
""" (EQ, ITE(cond, constant1, constant2), constant1) -> cond
(EQ, ITE(cond, constant1, constant2), constant2) -> NOT cond
(EQ (extract a, b, c) (extract a, b, c))
"""
if isinstance(expression.operands[0], BitVecITE) and isinstance(
expression.operands[1], Constant
):
if isinstance(expression.operands[0].operands[1], Constant) and isinstance(
expression.operands[0].operands[2], Constant
):
value1, value2, value3 = (
expression.operands[1].value,
expression.operands[0].operands[1].value,
expression.operands[0].operands[2].value,
)
if value1 == value2 and value1 != value3:
return expression.operands[0].operands[
0
] # FIXME: this may break taint propagation
elif value1 == value3 and value1 != value2:
return BoolNot(expression.operands[0].operands[0], taint=expression.taint)
if operands[0] is operands[1]:
return BoolConstant(True, taint=expression.taint)
if isinstance(operands[0], BitVecExtract) and isinstance(operands[1], BitVecExtract):
if (
operands[0].value is operands[1].value
and operands[0].end == operands[1].end
and operands[0].begining == operands[1].begining
):
return BoolConstant(True, taint=expression.taint)
def visit_BoolOr(self, expression, a, b):
if isinstance(a, Constant):
if a.value == False:
return b
if a.value == True:
return a
if isinstance(b, Constant):
if b.value == False:
return a
if b.value == True:
return b
if a is b:
return a
def visit_BitVecITE(self, expression, *operands):
if isinstance(operands[0], Constant):
if operands[0].value:
result = operands[1]
else:
result = operands[2]
new_taint = result._taint | operands[0].taint
if result._taint != new_taint:
result = copy.copy(result)
result._taint = new_taint
return result
if self._changed(expression, operands):
return BitVecITE(expression.size, *operands, taint=expression.taint)
def visit_BitVecConcat(self, expression, *operands):
""" concat( extract(k1, 0, a), extract(sizeof(a)-k1, k1, a)) ==> a
concat( extract(k1, beg, a), extract(end, k1, a)) ==> extract(beg, end, a)
concat( x , extract(k1, beg, a), extract(end, k1, a), z) ==> concat( x , extract(k1, beg, a), extract(end, k1, a), z)
"""
if len(operands) == 1:
return operands[0]
changed = False
last_o = None
new_operands = []
for o in operands:
if isinstance(o, BitVecExtract):
if last_o is None:
last_o = o
else:
if last_o.value is o.value and last_o.begining == o.end + 1:
last_o = BitVecExtract(
o.value, o.begining, last_o.end - o.begining + 1, taint=expression.taint
)
changed = True
else:
new_operands.append(last_o)
last_o = o
else:
if last_o is not None:
new_operands.append(last_o)
last_o = None
new_operands.append(o)
if last_o is not None:
new_operands.append(last_o)
if changed:
return BitVecConcat(expression.size, *new_operands)
op = expression.operands[0]
value = None
end = None
begining = None
for o in operands:
# If found a non BitVecExtract, do not apply
if not isinstance(o, BitVecExtract):
value = None
break
# Set the value for the first item
if value is None:
value = o.value
begining = o.begining
end = o.end
else:
# If concat of extracts of different values do not apply
if value is not o.value:
value = None
break
# If concat of non contiguous extracs do not apply
if begining != o.end + 1:
value = None
break
# update begining variable
begining = o.begining
if value is not None:
if end + 1 != value.size or begining != 0:
return BitVecExtract(value, begining, end - begining + 1, taint=expression.taint)
return value
def visit_BitVecExtract(self, expression, *operands):
""" extract(sizeof(a), 0)(a) ==> a
extract(16, 0)( concat(a,b,c,d) ) => concat(c, d)
extract(m,M)(and/or/xor a b ) => and/or/xor((extract(m,M) a) (extract(m,M) a)
"""
op = expression.operands[0]
begining = expression.begining
end = expression.end
size = end - begining + 1
# extract(sizeof(a), 0)(a) ==> a
if begining == 0 and end + 1 == op.size:
return op
elif isinstance(op, BitVecExtract):
return BitVecExtract(op.value, op.begining + begining, size, taint=expression.taint)
elif isinstance(op, BitVecConcat):
new_operands = []
for item in reversed(op.operands):
if size == 0:
assert expression.size == sum([x.size for x in new_operands])
return BitVecConcat(
expression.size, *reversed(new_operands), taint=expression.taint
)
if begining >= item.size:
# skip the item
begining -= item.size
else:
if begining == 0 and size == item.size:
new_operands.append(item)
size = 0
else:
if size <= item.size - begining:
new_operands.append(BitVecExtract(item, begining, size))
size = 0
else:
new_operands.append(BitVecExtract(item, begining, item.size - begining))
size -= item.size - begining
begining = 0
if isinstance(op, (BitVecAnd, BitVecOr, BitVecXor)):
bitoperand_a, bitoperand_b = op.operands
return op.__class__(
BitVecExtract(bitoperand_a, begining, expression.size),
BitVecExtract(bitoperand_b, begining, expression.size),
taint=expression.taint,
)
def visit_BitVecAdd(self, expression, *operands):
""" a + 0 ==> a
0 + a ==> a
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
if isinstance(left, BitVecConstant):
if left.value == 0:
return right
def visit_BitVecSub(self, expression, *operands):
""" a - 0 ==> 0
(a + b) - b ==> a
(b + a) - b ==> a
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(left, BitVecAdd):
if self._same_constant(left.operands[0], right):
return left.operands[1]
elif self._same_constant(left.operands[1], right):
return left.operands[0]
elif isinstance(left, BitVecSub) and isinstance(right, Constant):
subleft = left.operands[0]
subright = left.operands[1]
if isinstance(subright, Constant):
return BitVecSub(
subleft,
BitVecConstant(
subleft.size,
subright.value + right.value,
taint=subright.taint | right.taint,
),
)
def visit_BitVecOr(self, expression, *operands):
""" a | 0 => a
0 | a => a
0xffffffff & a => 0xffffffff
a & 0xffffffff => 0xffffffff
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value == left.mask:
return right
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecOr(left_left, (left_right | right), taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecOr(right, left, taint=expression.taint)
def visit_BitVecAnd(self, expression, *operands):
""" ct & x => x & ct move constants to the right
a & 0 => 0 remove zero
a & 0xffffffff => a remove full mask
(b & ct2) & ct => b & (ct&ct2) associative property
(a & (b | c) => a&b | a&c distribute over |
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return right
elif right.value == right.mask:
return left
elif isinstance(left, BitVecAnd):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecAnd(left_left, left_right & right, taint=expression.taint)
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
return BitVecOr(right & left_left, right & left_right, taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecAnd(right, left, taint=expression.taint)
def visit_BitVecShiftLeft(self, expression, *operands):
""" a << 0 => a remove zero
a << ct => 0 if ct > sizeof(a) remove big constant shift
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value >= right.size:
return left
def visit_ArraySelect(self, expression, *operands):
""" ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0)
-> v0
"""
arr, index = operands
if isinstance(arr, ArrayVariable):
return
if isinstance(index, BitVecConstant):
ival = index.value
# props are slow and using them in tight loops should be avoided, esp when they offer no additional validation
# arr._operands[1] = arr.index, arr._operands[0] = arr.array
while (
isinstance(arr, ArrayStore)
and isinstance(arr._operands[1], BitVecConstant)
and arr._operands[1]._value != ival
):
arr = arr._operands[0] # arr.array
if (
isinstance(index, BitVecConstant)
and isinstance(arr, ArrayStore)
and isinstance(arr.index, BitVecConstant)
and arr.index.value == index.value
):
return arr.value
else:
if arr is not expression.array:
return arr.select(index)
def visit_Expression(self, expression, *operands):
assert len(operands) == 0
assert not isinstance(expression, Operation)
return expression
arithmetic_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128, typed=True)
def arithmetic_simplify(expression):
global arithmetic_simplifier_cache
simp = ArithmeticSimplifier(cache=arithmetic_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
def to_constant(expression):
"""
Iff the expression can be simplified to a Constant get the actual concrete value.
This discards/ignore any taint
"""
value = simplify(expression)
if isinstance(value, Expression) and value.taint:
raise ValueError("Can not simplify tainted values to constant")
if isinstance(value, Constant):
return value.value
elif isinstance(value, Array):
if expression.index_max:
ba = bytearray()
for i in range(expression.index_max):
value_i = simplify(value[i])
if not isinstance(value_i, Constant):
break
ba.append(value_i.value)
else:
return bytes(ba)
return expression
return value
@lru_cache(maxsize=128, typed=True)
def simplify(expression):
expression = arithmetic_simplify(expression)
return expression
class TranslatorSmtlib(Translator):
""" Simple visitor to translate an expression to its smtlib representation
"""
unique = 0
def __init__(self, use_bindings=False, *args, **kw):
assert "bindings" not in kw
super().__init__(*args, **kw)
self.use_bindings = use_bindings
self._bindings_cache = {}
self._bindings = []
def _add_binding(self, expression, smtlib):
if not self.use_bindings or len(smtlib) <= 10:
return smtlib
if smtlib in self._bindings_cache:
return self._bindings_cache[smtlib]
TranslatorSmtlib.unique += 1
name = "a_%d" % TranslatorSmtlib.unique
self._bindings.append((name, expression, smtlib))
self._bindings_cache[expression] = name
return name
@property
def bindings(self):
return self._bindings
translation_table = {
BoolNot: "not",
BoolEqual: "=",
BoolAnd: "and",
BoolOr: "or",
BoolXor: "xor",
BoolITE: "ite",
BitVecAdd: "bvadd",
BitVecSub: "bvsub",
BitVecMul: "bvmul",
BitVecDiv: "bvsdiv",
BitVecUnsignedDiv: "bvudiv",
BitVecMod: "bvsmod",
BitVecRem: "bvsrem",
BitVecUnsignedRem: "bvurem",
BitVecShiftLeft: "bvshl",
BitVecShiftRight: "bvlshr",
BitVecArithmeticShiftLeft: "bvashl",
BitVecArithmeticShiftRight: "bvashr",
BitVecAnd: "bvand",
BitVecOr: "bvor",
BitVecXor: "bvxor",
BitVecNot: "bvnot",
BitVecNeg: "bvneg",
LessThan: "bvslt",
LessOrEqual: "bvsle",
GreaterThan: "bvsgt",
GreaterOrEqual: "bvsge",
UnsignedLessThan: "bvult",
UnsignedLessOrEqual: "bvule",
UnsignedGreaterThan: "bvugt",
UnsignedGreaterOrEqual: "bvuge",
BitVecSignExtend: "(_ sign_extend %d)",
BitVecZeroExtend: "(_ zero_extend %d)",
BitVecExtract: "(_ extract %d %d)",
BitVecConcat: "concat",
BitVecITE: "ite",
ArrayStore: "store",
ArraySelect: "select",
}
def visit_BitVecConstant(self, expression):
assert isinstance(expression, BitVecConstant)
if expression.size == 1:
return "#" + bin(expression.value & expression.mask)[1:]
else:
return "#x%0*x" % (int(expression.size / 4), expression.value & expression.mask)
def visit_BoolConstant(self, expression):
return expression.value and "true" or "false"
def visit_Variable(self, expression):
return expression.name
def visit_ArraySelect(self, expression, *operands):
array_smt, index_smt = operands
if isinstance(expression.array, ArrayStore):
array_smt = self._add_binding(expression.array, array_smt)
return "(select %s %s)" % (array_smt, index_smt)
def visit_Operation(self, expression, *operands):
operation = self.translation_table[type(expression)]
if isinstance(expression, (BitVecSignExtend, BitVecZeroExtend)):
operation = operation % expression.extend
elif isinstance(expression, BitVecExtract):
operation = operation % (expression.end, expression.begining)
operands = [self._add_binding(*x) for x in zip(expression.operands, operands)]
return "(%s %s)" % (operation, " ".join(operands))
@property
def results(self):
raise SmtlibError("NOOO")
@property
def result(self):
output = super().result
if self.use_bindings:
for name, expr, smtlib in reversed(self._bindings):
output = "( let ((%s %s)) %s )" % (name, smtlib, output)
return output
def translate_to_smtlib(expression, **kwargs):
translator = TranslatorSmtlib(**kwargs)
translator.visit(expression)
return translator.result
class Replace(Visitor):
""" Simple visitor to replaces expressions """
def __init__(self, bindings=None, **kwargs):
super().__init__(**kwargs)
if bindings is None:
raise ValueError("bindings needed in replace")
self._replace_bindings = bindings
def visit_Variable(self, expression):
if expression in self._replace_bindings:
return self._replace_bindings[expression]
return expression
def replace(expression, bindings):
if not bindings:
return expression
visitor = Replace(bindings)
visitor.visit(expression, use_fixed_point=True)
result_expression = visitor.result
return result_expression
class ArraySelectSimplifier(Visitor):
class ExpressionNotSimple(RuntimeError):
pass
def __init__(self, target_index, **kwargs):
super().__init__(**kwargs)
self._target_index = target_index
self.stores = []
def visit_ArrayStore(self, exp, target, where, what):
if not isinstance(what, BitVecConstant):
raise self.ExpressionNotSimple
if where.value == self._target_index:
self.stores.append(what.value)
def simplify_array_select(array_exp):
assert isinstance(array_exp, ArraySelect)
simplifier = ArraySelectSimplifier(array_exp.index.value)
simplifier.visit(array_exp)
return simplifier.stores
def get_variables(expression):
visitor = GetDeclarations()
visitor.visit(expression)
return visitor.result
|
montyly/manticore
|
manticore/core/smtlib/visitors.py
|
Python
|
apache-2.0
| 34,588
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_log import log as logging
from oslo_utils import timeutils
from trove.common import cfg
from trove.common import context as trove_context
from trove.common.i18n import _
from trove.common import instance
from trove.conductor import api as conductor_api
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseDbStatus(object):
"""
Answers the question "what is the status of the DB application on
this box?" The answer can be that the application is not installed, or
the state of the application is determined by calling a series of
commands.
This class also handles saving and load the status of the DB application
in the database.
The status is updated whenever the update() method is called, except
if the state is changed to building or restart mode using the
"begin_install" and "begin_restart" methods.
The building mode persists in the database while restarting mode does
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install or end_restart() is called, at which point the status again
reflects the actual status of the DB app.
This is a base class, subclasses must implement real logic for
determining current status of DB in _get_actual_db_status()
"""
_instance = None
GUESTAGENT_DIR = '~'
PREPARE_START_FILENAME = '.guestagent.prepare.start'
PREPARE_END_FILENAME = '.guestagent.prepare.end'
def __init__(self):
if self._instance is not None:
raise RuntimeError(_("Cannot instantiate twice."))
self.status = None
self.restart_mode = False
self.__prepare_completed = None
@property
def prepare_completed(self):
if self.__prepare_completed is None:
# Force the file check
self.__refresh_prepare_completed()
return self.__prepare_completed
def __refresh_prepare_completed(self):
# Set the value of __prepared_completed based on the existence of
# the file. This is required as the state is cached so this method
# must be called any time the existence of the file changes.
self.__prepare_completed = os.path.isfile(
guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME))
def begin_install(self):
"""First call of the DB prepare."""
prepare_start_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME)
operating_system.write_file(prepare_start_file, '')
self.__refresh_prepare_completed()
self.set_status(instance.ServiceStatuses.BUILDING, True)
def begin_restart(self):
"""Called before restarting DB server."""
self.restart_mode = True
def set_ready(self):
prepare_end_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)
operating_system.write_file(prepare_end_file, '')
self.__refresh_prepare_completed()
def end_install(self, error_occurred=False, post_processing=False):
"""Called after prepare has ended."""
# Set the "we're done" flag if there's no error and
# no post_processing is necessary
if not (error_occurred or post_processing):
self.set_ready()
final_status = None
if error_occurred:
final_status = instance.ServiceStatuses.FAILED
elif post_processing:
final_status = instance.ServiceStatuses.INSTANCE_READY
if final_status:
LOG.info(_("Set final status to %s."), final_status)
self.set_status(final_status, force=True)
else:
self._end_install_or_restart(True)
def end_restart(self):
self.restart_mode = False
LOG.info(_("Ending restart."))
self._end_install_or_restart(False)
def _end_install_or_restart(self, force):
"""Called after DB is installed or restarted.
Updates the database with the actual DB server status.
"""
real_status = self._get_actual_db_status()
LOG.info(_("Current database status is '%s'."), real_status)
self.set_status(real_status, force=force)
def _get_actual_db_status(self):
raise NotImplementedError()
@property
def is_installed(self):
"""
True if DB app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return self.prepare_completed
@property
def _is_restarting(self):
return self.restart_mode
@property
def is_running(self):
"""True if DB server is running."""
return (self.status is not None and
self.status == instance.ServiceStatuses.RUNNING)
def set_status(self, status, force=False):
"""Use conductor to update the DB app status."""
if force or self.is_installed:
LOG.debug("Casting set_status message to conductor "
"(status is '%s').", status.description)
context = trove_context.TroveContext()
heartbeat = {'service_status': status.description}
conductor_api.API(context).heartbeat(
CONF.guest_id, heartbeat,
sent=timeutils.utcnow_ts(microsecond=True))
LOG.debug("Successfully cast set_status.")
self.status = status
else:
LOG.debug("Prepare has not completed yet, skipping heartbeat.")
def update(self):
"""Find and report status of DB on this machine.
The database is updated and the status is also returned.
"""
if self.is_installed and not self._is_restarting:
LOG.debug("Determining status of DB server.")
status = self._get_actual_db_status()
self.set_status(status)
else:
LOG.info(_("DB server is not installed or is in restart mode, so "
"for now we'll skip determining the status of DB on "
"this instance."))
def restart_db_service(self, service_candidates, timeout):
"""Restart the database.
Do not change the service auto-start setting.
Disable the Trove instance heartbeat updates during the restart.
1. Stop the database service.
2. Wait for the database to shutdown.
3. Start the database service.
4. Wait for the database to start running.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:raises: :class:`RuntimeError` on failure.
"""
try:
self.begin_restart()
self.stop_db_service(service_candidates, timeout,
disable_on_boot=False, update_db=False)
self.start_db_service(service_candidates, timeout,
enable_on_boot=False, update_db=False)
except Exception as e:
LOG.exception(e)
raise RuntimeError(_("Database restart failed."))
finally:
self.end_restart()
def start_db_service(self, service_candidates, timeout,
enable_on_boot=True, update_db=False):
"""Start the database service and wait for the database to become
available.
The service auto-start will be updated only if the service command
succeeds.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param enable_on_boot: Enable service auto-start.
The auto-start setting will be updated
only if the service command succeeds.
:type enable_on_boot: boolean
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.info(_("Starting database service."))
operating_system.start_service(service_candidates, timeout=timeout)
self.wait_for_database_service_start(timeout, update_db=update_db)
if enable_on_boot:
LOG.info(_("Enable service auto-start on boot."))
operating_system.enable_service_on_boot(service_candidates)
def wait_for_database_service_start(self, timeout, update_db=False):
"""Wait for the database to become available.
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.debug("Waiting for database to start up.")
if not self._wait_for_database_service_status(
instance.ServiceStatuses.RUNNING, timeout, update_db):
raise RuntimeError(_("Database failed to start."))
LOG.info(_("Database has started successfully."))
def stop_db_service(self, service_candidates, timeout,
disable_on_boot=False, update_db=False):
"""Stop the database service and wait for the database to shutdown.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param disable_on_boot: Disable service auto-start.
The auto-start setting will be updated
only if the service command succeeds.
:type disable_on_boot: boolean
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.info(_("Stopping database service."))
operating_system.stop_service(service_candidates, timeout=timeout)
LOG.debug("Waiting for database to shutdown.")
if not self._wait_for_database_service_status(
instance.ServiceStatuses.SHUTDOWN, timeout, update_db):
raise RuntimeError(_("Database failed to stop."))
LOG.info(_("Database has stopped successfully."))
if disable_on_boot:
LOG.info(_("Disable service auto-start on boot."))
operating_system.disable_service_on_boot(service_candidates)
def _wait_for_database_service_status(self, status, timeout, update_db):
"""Wait for the given database status.
:param status: The status to wait for.
:type status: BaseDbStatus
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:returns: True on success, False otherwise.
"""
if not self.wait_for_real_status_to_change_to(
status, timeout, update_db):
LOG.info(_("Service status did not change to %(status)s "
"within the given timeout: %(timeout)ds"),
{'status': status, 'timeout': timeout})
LOG.debug("Attempting to cleanup stalled services.")
try:
self.cleanup_stalled_db_services()
except Exception:
LOG.debug("Cleanup failed.", exc_info=True)
return False
return True
def wait_for_real_status_to_change_to(self, status, max_time,
update_db=False):
"""Waits the given time for the real status to change to the one
specified.
The internal status is always updated. The public instance
state stored in the Trove database is updated only if "update_db" is
True.
"""
end_time = time.time() + max_time
# since python does not support a real do-while loop, we have
# to emulate one. Hence these shenanigans. We force at least
# one pass into the loop and therefore it is safe that
# actual_status is initialized in the loop while it is used
# outside.
loop = True
while loop:
self.status = self._get_actual_db_status()
if self.status == status:
if update_db:
self.set_status(self.status)
return True
# should we remain in this loop? this is the thing
# that emulates the do-while construct.
loop = (time.time() < end_time)
# no point waiting if our time is up and we're
# just going to error out anyway.
if loop:
LOG.debug("Waiting for DB status to change from "
"%(actual_status)s to %(status)s.",
{"actual_status": self.status, "status": status})
time.sleep(CONF.state_change_poll_time)
LOG.error(_("Timeout while waiting for database status to change."
"Expected state %(status)s, "
"current state is %(actual_status)s"),
{"status": status, "actual_status": self.status})
return False
def cleanup_stalled_db_services(self):
"""An optional datastore-specific code to cleanup stalled
database services and other resources after a status change timeout.
"""
LOG.debug("No cleanup action specified for this datastore.")
def report_root(self, context, user):
"""Use conductor to update the root-enable status."""
LOG.debug("Casting report_root message to conductor.")
conductor_api.API(context).report_root(CONF.guest_id, user)
LOG.debug("Successfully cast report_root.")
|
zhangg/trove
|
trove/guestagent/datastore/service.py
|
Python
|
apache-2.0
| 15,157
|
"""Example of specifying an autoregressive action distribution.
In an action space with multiple components (e.g., Tuple(a1, a2)), you might
want a2 to be sampled based on the sampled value of a1, i.e.,
a2_sampled ~ P(a2 | a1_sampled, obs). Normally, a1 and a2 would be sampled
independently.
To do this, you need both a custom model that implements the autoregressive
pattern, and a custom action distribution class that leverages that model.
This examples shows both.
"""
import argparse
import ray
from ray import tune
from ray.rllib.examples.env.correlated_actions_env import CorrelatedActionsEnv
from ray.rllib.examples.models.autoregressive_action_model import \
AutoregressiveActionModel, TorchAutoregressiveActionModel
from ray.rllib.examples.models.autoregressive_action_dist import \
BinaryAutoregressiveDistribution, TorchBinaryAutoregressiveDistribution
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO") # try PG, PPO, IMPALA
parser.add_argument("--torch", action="store_true")
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=200)
parser.add_argument("--stop-timesteps", type=int, default=100000)
parser.add_argument("--stop-reward", type=float, default=200)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
ModelCatalog.register_custom_model(
"autoregressive_model", TorchAutoregressiveActionModel
if args.torch else AutoregressiveActionModel)
ModelCatalog.register_custom_action_dist(
"binary_autoreg_dist", TorchBinaryAutoregressiveDistribution
if args.torch else BinaryAutoregressiveDistribution)
config = {
"env": CorrelatedActionsEnv,
"gamma": 0.5,
"num_gpus": 0,
"model": {
"custom_model": "autoregressive_model",
"custom_action_dist": "binary_autoreg_dist",
},
"framework": "torch" if args.torch else "tf",
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(args.run, stop=stop, config=config)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
|
robertnishihara/ray
|
rllib/examples/autoregressive_action_dist.py
|
Python
|
apache-2.0
| 2,502
|
#!/usr/bin/python
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
#
# recommended pylint: pylint kadduser.py -d maybe-no-member,line-too-long --indent-string " "
# recommended formating: autopep8 --indent-size 2 -i --ignore E501 kadduser.py
DOCUMENTATION = '''This module will add user principals to kerberos if it doesn't exists. It wont change password if user already exist in kerberos.'''
EXAMPLES = '''
- name: add user
kadduser: name='root' password='kerberos_password'
'''
from ansible.module_utils.basic import *
from subprocess import PIPE, Popen
# arguments that the module gets in various actions
MODULE_ARGUMENTS = {
'name': {'type': 'str', 'required': True},
'password': {'type': 'str'},
'params': {'type': 'str'}
}
def execute(cmd, scnd_command=None):
cmd = 'kadmin.local -q "{0}" '.format(cmd)
if scnd_command != None:
cmd += ' | {0}'.format(scnd_command)
proc = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
proc.wait()
return out, err
def main():
module = AnsibleModule(argument_spec=MODULE_ARGUMENTS)
# script will only set password at start, at creation time. If you want change it you have to delete user before script start
name_a = module.params.get('name', None)
password_a = module.params.get('password', None)
params_a = module.params.get('params', '')
std_o, err_o = execute('list_principals', ' grep "{0}@"'.format(name_a))
if err_o != '' and err_o != None:
module.fail_json(msg='Kerberos error {0}'.format(err_o))
changed = False
# checking if principal elready exist
if std_o == '' or std_o == None:
cmd_a = 'addprinc ';
if password_a != None and password_a != '':
cmd_a += '-pw {1} '
elif '-nokey' not in params_a:
cmd_a += '-randkey '
cmd_a += '{2} {0}'
std_o, err_o = execute(cmd_a.format(name_a, password_a, params_a))
if err_o != '' and err_o != None and err_o[0] != 'W':
module.fail_json(msg='Kerberos error {0}'.format(err_o))
changed = True
module.exit_json(changed=changed, msg='Everything is done')
main()
|
trustedanalytics/platform-ansible
|
library/kadduser.py
|
Python
|
apache-2.0
| 2,667
|
#!/usr/bin/env python
import os
import unittest
import logging.config
from testcore import tests_from_modules
from test_django import run_django_tests
# anybody who references django needs this before loading
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_tester.settings'
# add any non-django modules to be tested here
non_django_test_modules = (
'test_binfile',
'test_existdb',
'test_fedora',
'test_xmlmap',
'test_xpath',
)
def non_django_tests():
return tests_from_modules(non_django_test_modules)
if __name__ == '__main__':
test_dir = os.path.dirname(os.path.abspath(__file__))
LOGGING_CONF = os.path.join(test_dir, 'logging.conf')
if os.path.exists(LOGGING_CONF):
logging.config.fileConfig(LOGGING_CONF)
run_django_tests(extras=non_django_tests())
|
emory-libraries/eulcore-history
|
test/test_all.py
|
Python
|
apache-2.0
| 812
|
"""
@Author Jaret Deprin
@Usage
import jarets_mysql_wrapper
# All parameters are required even if user/pass are empty
c = jarets_mysql_wrapper.Mysql(DBHOST="localhost", DBNAME="test", DBUSER="", DBPASS="")
# Stage query
c.__stage = True
Will print queries / values but not execute them.
# Select statment examples
results = c.select(table, columns, where(optional))
results = c.select("jdtest", "*")
results = c.select("jdtest", "id", "name")
results = c.select("jdtest", "id", "name", where="id=2")
results = c.select("jdtest", "id", "name", where="state='running' AND bool=True")
# Insert statement examples
# Both keyword arguments and un-named arguments are supported
# Use caution with un-named arguments as they could insert valued into incorrect
fields if you write the values in the incorrect order.
new_row_primarykey = c.insert(table, **column_names=values)
new_row_primarykey = c.insert("jdtest", state="stopped", ip="10.1.1.5", name="host5", bool=True)
new_row_primarykey = c.insert("jdtest", 6,"host6","running",False,"10.1.1.6")
# Update statement examples
c.update(table, where, **column_names=values)
c.update("jdtest", where="id=6", ip="10.1.1.6", bool=False)
# Delete statement examples
c.delete(table, where)
c.delete("jdtest", where="id=6")
"""
import MySQLdb
import MySQLdb.cursors
class Mysql(object):
__instance = None
__session = None
__connection = None
def __init__(self, *args, **kwargs):
self.__host = kwargs.get('hostname')
self.__user = kwargs.get('user')
self.__password = kwargs.get('password')
self.__database = kwargs.get('database')
self.__stage = kwargs.get('stage', False)
self._mysql_conf_char = kwargs.get("encoding", "latin1")
def __open(self):
try:
conn = MySQLdb.connect(host=self.__host,
user=self.__user,
passwd=self.__password,
db=self.__database,
cursorclass=MySQLdb.cursors.DictCursor,
charset=self._mysql_conf_char)
self.__connection = conn
self.__session = conn.cursor()
#self.__connection.autocommit = True
except MySQLdb.Error as e:
print("MySQL Connection Error [%d]: %s" % (e.args[0], e.args[1]))
def __close(self):
try:
self.__session.close()
self.__connection.close()
except MySQLdb.Error as e:
print("MySQL Error Closing [%d]: %s" % (e.args[0], e.args[1]))
def insert(self, table, *args, **kwargs):
values = None
query = "INSERT INTO %s" % table
if kwargs:
keys = kwargs.keys()
values = kwargs.values()
query += "(" + ",".join(["%s"]*len(keys)) % tuple(keys) + ") VALUES(" + ",".join(["%s"]*len(values)) + ")"
elif args:
values = args
query += " VALUES(" + ",".join(["%s"]*len(values)) + ")"
if self.__stage is True:
print(query % tuple(values))
return True
self.__open()
try:
self.__session.execute(query, values)
except MySQLdb.Error as e:
print("MySQL Error Closing [%d]: %s" % (e.args[0], e.args[1]))
print(query % tuple(values))
self.__connection.rollback()
last_row = self.__session.lastrowid
self.__connection.commit()
self.__close()
return last_row
def select(self, table, *args, **kwargs):
result = None
keys = args
query = "SELECT " + ",".join(keys) + " FROM " + table
if kwargs.get('where') is not None:
query += " WHERE %s" % kwargs['where']
self.__open()
self.__session.execute(query)
result = self.__session.fetchall()
self.__connection.commit()
self.__close()
return result
def update(self, table, where, **kwargs):
values = kwargs.values()
update_list = ["" + key + "=%s" for key in kwargs.keys()]
query = "UPDATE " + table + " SET " + ",".join(update_list) + " WHERE " + where
if self.__stage is True:
print(query % tuple(values))
return True
self.__open()
try:
self.__session.execute(query, values)
except MySQLdb.Error as e:
print("MySQL Error Closing [%d]: %s" % (e.args[0], e.args[1]))
print(query % tuple(values))
self.__connection.rollback()
self.__connection.commit()
self.__close()
def delete(self, table, where):
query = "DELETE FROM %s WHERE %s" % (table, where)
if self.__stage is True:
print(query)
return True
self.__open()
try:
self.__session.execute(query)
except MySQLdb.Error as e:
print("MySQL Error Closing [%d]: %s" % (e.args[0], e.args[1]))
print(query)
self.__connection.rollback()
self.__connection.commit()
self.__close()
def call_store_procedure(self, name, *args):
result_sp = None
self.__open()
self.__session.callproc(name, args)
self.__connection.commit()
result_sp = self.__session.fetchall()
self.__close()
return result_sp
|
jdeprin/python_tools
|
General/jarets_mysql_wrapper.py
|
Python
|
apache-2.0
| 4,654
|
from __future__ import absolute_import
import logging
import numbers
from threading import Lock
import kafka.common
from kafka.common import (
OffsetRequest, OffsetCommitRequest, OffsetFetchRequest,
UnknownTopicOrPartitionError
)
from kafka.util import ReentrantTimer
log = logging.getLogger("kafka")
AUTO_COMMIT_MSG_COUNT = 100
AUTO_COMMIT_INTERVAL = 5000
FETCH_DEFAULT_BLOCK_TIMEOUT = 1
FETCH_MAX_WAIT_TIME = 100
FETCH_MIN_BYTES = 4096
FETCH_BUFFER_SIZE_BYTES = 4096
MAX_FETCH_BUFFER_SIZE_BYTES = FETCH_BUFFER_SIZE_BYTES * 8
ITER_TIMEOUT_SECONDS = 60
NO_MESSAGES_WAIT_TIME_SECONDS = 0.1
class Consumer(object):
"""
Base class to be used by other consumers. Not to be used directly
This base class provides logic for
* initialization and fetching metadata of partitions
* Auto-commit logic
* APIs for fetching pending message count
"""
def __init__(self, client, group, topic, partitions=None, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL):
self.client = client
self.topic = topic
self.group = group
self.client.load_metadata_for_topics(topic)
self.offsets = {}
if not partitions:
partitions = self.client.get_partition_ids_for_topic(topic)
else:
assert all(isinstance(x, numbers.Integral) for x in partitions)
# Variables for handling offset commits
self.commit_lock = Lock()
self.commit_timer = None
self.count_since_commit = 0
self.auto_commit = auto_commit
self.auto_commit_every_n = auto_commit_every_n
self.auto_commit_every_t = auto_commit_every_t
# Set up the auto-commit timer
if auto_commit is True and auto_commit_every_t is not None:
self.commit_timer = ReentrantTimer(auto_commit_every_t,
self.commit)
self.commit_timer.start()
if auto_commit:
self.fetch_last_known_offsets(partitions)
else:
for partition in partitions:
self.offsets[partition] = 0
def fetch_last_known_offsets(self, partitions=None):
if not partitions:
partitions = self.client.get_partition_ids_for_topic(self.topic)
def get_or_init_offset(resp):
try:
kafka.common.check_error(resp)
return resp.offset
except UnknownTopicOrPartitionError:
return 0
for partition in partitions:
req = OffsetFetchRequest(self.topic, partition)
(resp,) = self.client.send_offset_fetch_request(self.group, [req],
fail_on_error=False)
self.offsets[partition] = get_or_init_offset(resp)
self.fetch_offsets = self.offsets.copy()
def commit(self, partitions=None):
"""
Commit offsets for this consumer
partitions: list of partitions to commit, default is to commit
all of them
"""
# short circuit if nothing happened. This check is kept outside
# to prevent un-necessarily acquiring a lock for checking the state
if self.count_since_commit == 0:
return
with self.commit_lock:
# Do this check again, just in case the state has changed
# during the lock acquiring timeout
if self.count_since_commit == 0:
return
reqs = []
if not partitions: # commit all partitions
partitions = self.offsets.keys()
for partition in partitions:
offset = self.offsets[partition]
log.debug("Commit offset %d in SimpleConsumer: "
"group=%s, topic=%s, partition=%s" %
(offset, self.group, self.topic, partition))
reqs.append(OffsetCommitRequest(self.topic, partition,
offset, None))
resps = self.client.send_offset_commit_request(self.group, reqs)
for resp in resps:
kafka.common.check_error(resp)
self.count_since_commit = 0
def _auto_commit(self):
"""
Check if we have to commit based on number of messages and commit
"""
# Check if we are supposed to do an auto-commit
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit()
def stop(self):
if self.commit_timer is not None:
self.commit_timer.stop()
self.commit()
def pending(self, partitions=None):
"""
Gets the pending message count
partitions: list of partitions to check for, default is to check all
"""
if not partitions:
partitions = self.offsets.keys()
total = 0
reqs = []
for partition in partitions:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
resps = self.client.send_offset_request(reqs)
for resp in resps:
partition = resp.partition
pending = resp.offsets[0]
offset = self.offsets[partition]
total += pending - offset
return total
|
CrowdStrike/kafka-python
|
kafka/consumer/base.py
|
Python
|
apache-2.0
| 5,435
|
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This config class will encapsulate the configuration parameters
needed to run a simulation. Programs can derive specific
configuration sets from this base class to suppor their own programs.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
# Import Broadband modules
import velocity_models
class WccSiteampCfg(object):
"""
Define the configuration parameters for the Jbrun program
"""
def __init__(self, vmodel_name, method):
# self.SITEAMP_MODEL3D = "cb2014"
self.SITEAMP_MODEL = "bssa2014"
self.FILTLIST = "filtmatchlist1"
self.GEN_ROCK_VS = 865
self.VREF_MAX = 1100
self.FMIN = 0.05
self.FMIDBOT = 0.1
self.FLOWCAP = 0.0
self.FMAX = 50.0
self.FHIGHTOP = 20.0
self.COMPS = ["000", "090", "ver"]
vmodel_obj = velocity_models.get_velocity_model_by_name(vmodel_name)
if vmodel_obj is None:
raise IndexError("Cannot find velocity model: %s" %
(vmodel_name))
if method.lower() == "ucsb":
vmodel_params = vmodel_obj.get_codebase_params('ucsb')
elif method.lower() == "exsim":
vmodel_params = vmodel_obj.get_codebase_params('exsim')
elif method.lower() == "sdsu":
vmodel_params = vmodel_obj.get_codebase_params('sdsu')
else:
# For now...
vmodel_params = vmodel_obj.get_codebase_params('gp')
# Read reference velocities for LF and HF components, use defaults
# values if not found so that the code will still work without GP GFs
if 'LF_VREF' in vmodel_params:
self.LF_VREF = int(vmodel_params['LF_VREF'])
else:
self.LF_VREF = self.GEN_ROCK_VS
if 'HF_VREF' in vmodel_params:
self.HF_VREF = int(vmodel_params['HF_VREF'])
else:
self.HF_VREF = self.GEN_ROCK_VS
if __name__ == "__main__":
print("Test Config Class: %s" % os.path.basename(sys.argv[0]))
|
SCECcode/BBP
|
bbp/comps/wcc_siteamp_cfg.py
|
Python
|
apache-2.0
| 2,652
|
#!/usr/bin/env python
#_*_encoding:utf-8_*_
# encoding:utf-8
import os,sys
import httplib2,hashlib
import commands
http = httplib2.Http()
def l_main(cmd):
if cmd == 'l_version' :return l_version()
if cmd == 'l_update' :return l_update()
if cmd == 'l_pwd' :return l_pwd()
#if cmd == 'l_restart' :return l_restart()
return False
def l_restart():
restart = '/bin/pwd && sh 1.sh'
ifresta = '/bin/ps aux | /bin/grep restart.sh | /bin/grep -v grep | /usr/bin/wc -l'
ifcomma = os.popen(ifresta).readlines()[0].split()[0]
if int(ifcomma) == 0:
#os.execve('/usr/bin/python /root/starl/Agent/Server/s.py', '1', '2')
#os.popen(restart).readlines()
#print commands.getstatusoutput('/root/starl/Agent/Server/restart.sh.x')
return 'Restart...'
else:
return 'Restart Ing...'
def l_pwd():
#print os.getppid()
#print os.getpid()
return sys.path[0]
def l_version():
return '0.1'
def file_md5(filename):
f = open(filename, 'a+r')
return hashlib.md5(f.read()).hexdigest()
def file_write(filename, up_path):
url = 'http://agent.mangle.starl.com.cn/'+filename
f = open (up_path, 'w')
f.write(get_http(url))
return 1
def get_http(url):
response,content = http.request(url,'GET')
return content
def l_update():
update_url = 'http://agent.mangle.starl.com.cn/update.txt'
content = get_http(update_url)
update_dict = eval(content)
update_done = {}
if update_dict.get('version') == l_version():
return 'latest'
else:
del update_dict['version']
for up_name, up_md5 in update_dict.items():
up_path = sys.path[0] + '/' + up_name
local_file_md5 = file_md5(up_path)
serve_file_md5 = up_md5
if local_file_md5 != serve_file_md5:
if file_write(up_name, up_path):
update_done[up_name] = 'update ok...'
else:
update_done[up_name] = 'update no...'
if update_done:
return update_done
else:
return 'All latest...'
|
selboo/starl-mangle
|
Agent/Server/l_command.py
|
Python
|
apache-2.0
| 1,864
|
import requests
import json
import re
'''
This script will not work until $TOKEN_FILE_PATH
is replaced with an actual path.
'''
token_file = "$TOKEN_FILE_PATH"
file_id = "2f97081c-7e84-4a93-91a8-fee860769f8e"
data_endpt = "https://api.gdc.cancer.gov/data/{}".format(file_id)
with open(token_file, "r") as token:
token_string = str(token.read().strip())
response = requests.get(data_endpt,
headers = {
"Content-Type": "application/json",
"X-Auth-Token": token_string
})
response_head_cd = response.headers["Content-Disposition"]
file_name = re.findall("filename=(.+)", response_head_cd)[0]
with open(file_name, "wb") as output_file:
output_file.write(response.content)
|
NCIP/gdc-docs
|
docs/API/Users_Guide/scripts/Download_Files_Token.py
|
Python
|
apache-2.0
| 794
|
def checkPassword(attempts, password):
def check():
while True:
attemp = yield
if attemp == password:
yield True
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
|
emirot/codefights
|
intro/checkPassword.py
|
Python
|
apache-2.0
| 327
|
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Region in python"""
import numpy as np
def entry_index(batch, w, h, outputs, classes, coords, location, entry):
n = int(location/(w*h))
loc = location%(w*h)
return batch*outputs + n*w*h*(coords+classes+1) + entry*w*h + loc
def region_python(a_np, N, classes, coords, background, softmax):
"""Region operator
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
N : int
Darknet layer parameter n
classes : int
Darknet layer parameter classes
coords : int
Darknet layer parameter coords
background : int
Darknet layer parameter background
softmax : int
Darknet layer parameter softmax
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
a_np_temp = np.reshape(a_np, batch*in_channel*in_height*in_width)
outputs = batch*in_channel*in_height*in_width
b_np = np.zeros(batch*in_channel*in_height*in_width)
for i in range(batch*in_channel*in_height*in_width):
b_np[i] = a_np_temp[i]
for b in range(batch):
for n in range(N):
index = entry_index(b, in_width, in_height, outputs, classes, coords, n*in_width*in_height, 0)
b_np[index: index+2*in_width*in_height] = 1/(1+np.exp(-1*b_np[index: index+2*in_width*in_height]))
index = entry_index(b, in_width, in_height, outputs, classes, coords, n*in_width*in_height, coords)
if not background:
b_np[index: index+in_width*in_height] = 1/(1+np.exp(-1*b_np[index: index+in_width*in_height]))
b_np = np.reshape(b_np, (batch, in_channel, in_height, in_width))
def local_softmax(data_in):
data_c, data_h, data_w = data_in.shape
largest = np.max(data_in, axis=1)
data_out = np.zeros((data_c, data_h, data_w))
for i in range(data_h):
for j in range(data_w):
data_out[:, i, j] = np.exp(data_in[:, i, j] - largest[i, j])
return data_out/data_out.sum(axis=0)
if softmax:
index = coords + int(not background)
for b in range(batch):
for i in range(N):
b_np_index = int(i*(in_channel/N) + index)
b_np[b, b_np_index: b_np_index + classes+background, :, :] = local_softmax(b_np[b, b_np_index:b_np_index + classes+background, :, :])
return b_np
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/testing/region_python.py
|
Python
|
apache-2.0
| 2,571
|
# coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
import distutils.version as dist_version
import os
import re
import migrate
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate.versioning import util as migrate_util
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from heat.openstack.common.db import exception
from heat.openstack.common.db.sqlalchemy import session as db_session
from heat.openstack.common.gettextutils import _ # noqa
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version(abs_path, init_version):
"""Show the current version of the repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.DbMigrationError(
message=_("Upgrade DB using Essex release first."))
def db_version_control(abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
global _REPOSITORY
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
if _REPOSITORY is None:
_REPOSITORY = Repository(abs_path)
return _REPOSITORY
|
savi-dev/heat
|
heat/openstack/common/db/sqlalchemy/migration.py
|
Python
|
apache-2.0
| 10,063
|
from django.db import models
from django.conf import settings
if not getattr(settings, 'DACH_CONFIG').get('storage', None):
__all__ = ['DachObject']
class DachManager(models.Manager):
def get_or_none(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except self.model.DoesNotExist:
return None
class DachObject(models.Model):
objects = DachManager()
id = models.CharField(
primary_key=True,
max_length=1024
)
value = models.TextField(
null=False
)
|
ffaraone/dach
|
dach/models.py
|
Python
|
apache-2.0
| 620
|
import numpy as np
import lightgbm as lgb
from ..component_converter import ComponentConverterBase
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam, \
DecisionTreeModelParam, NodeParam
from federatedml.util import consts
from federatedml.util import LOGGER
"""
We only keep the necessary variable to make sure that lightgbm can run predict function on the converted model
"""
FAKE_FEATURE_INFO_STR = '[0:1] '
END_OF_TREE = 'end of trees'
END_OF_PARA = 'end of parameters'
SPLIT = '\n\n'
HEADER_TEMPLATE = """tree
version=v3
num_class={}
num_tree_per_iteration={}
label_index={}
max_feature_idx={}
objective={}
feature_names={}
feature_infos={}
"""
TREE_TEMPLATE = """Tree={}
num_leaves={}
num_cat={}
split_feature={}
threshold={}
decision_type={}
left_child={}
right_child={}
leaf_value={}
internal_value={}
shrinkage={}
"""
PARA_TEMPLATE = """parameters:
[boosting: gbdt]
[objective: {}]
[num_iterations: {}]
[learning_rate: {}]
[max_depth: {}]
[max_bin: {}]
[use_missing: {}]
[zero_as_missing: {}]
[num_class: {}]
[lambda_l1: {}]
[lambda_l2: {}]
[min_data_in_leaf: {}]
[min_gain_to_split: {}]
"""
LGB_OBJECTIVE = {
consts.BINARY: "binary sigmoid:1",
consts.REGRESSION: "regression",
consts.MULTY: 'multiclass num_class:{}'
}
PARA_OBJECTIVE = {
consts.BINARY: "binary",
consts.REGRESSION: "regression",
consts.MULTY: 'multiclass'
}
def get_decision_type(node: NodeParam, use_missing, zero_as_missing):
# 00 0 0
# Nan,0 or None default left or right? cat feature or not?
default_type = 0 # 0000 None, default right, not cat feat
if not use_missing:
return default_type
if node.missing_dir == -1:
default_type = default_type | 2 # 0010
if zero_as_missing:
default_type = default_type | 4 # 0100 0
else:
default_type = default_type | 8 # 1000 np.Nan
return default_type
def get_lgb_objective(task_type, num_classes, ret_dict, need_multi_format=True):
if task_type == consts.CLASSIFICATION:
if num_classes == 1:
objective = ret_dict[consts.BINARY]
else:
objective = ret_dict[consts.MULTY].format(num_classes) if need_multi_format else ret_dict[consts.MULTY]
else:
objective = ret_dict[consts.REGRESSION]
return objective
def list_to_str(l_):
return str(l_).replace('[', '').replace(']', '').replace(',', '')
def parse_header(param: BoostingTreeModelParam, meta: BoostingTreeModelMeta):
# generated header of lgb str model file
# binary/regression num class is 1 in lgb
num_classes = len(param.classes_) if len(param.classes_) > 2 else 1
objective = get_lgb_objective(meta.task_type, num_classes, LGB_OBJECTIVE, need_multi_format=True)
num_tree_per_iteration = param.tree_dim
label_index = 0 # by default
max_feature_idx = len(param.feature_name_fid_mapping) - 1
feature_names = ''
for name in [param.feature_name_fid_mapping[i] for i in range(max_feature_idx+1)]:
if ' ' in name: # space is not allowed
name = name.replace(' ', '-')
feature_names += name+' '
feature_names = feature_names[:-1]
feature_info = FAKE_FEATURE_INFO_STR * (max_feature_idx+1) # need to make fake feature info
feature_info = feature_info[:-1]
result_str = HEADER_TEMPLATE.format(num_classes, num_tree_per_iteration, label_index, max_feature_idx,
objective, feature_names, feature_info)
return result_str
def internal_count_computer(cur_id, tree_node, leaf_count, internal_count):
if cur_id in leaf_count:
return leaf_count[cur_id]
left_count = internal_count_computer(tree_node[cur_id].left_nodeid, tree_node, leaf_count, internal_count)
right_count = internal_count_computer(tree_node[cur_id].right_nodeid, tree_node, leaf_count, internal_count)
internal_count[cur_id] = left_count + right_count
return internal_count[cur_id]
def compute_internal_count(tree_param: DecisionTreeModelParam):
root = tree_param.tree_[0]
internal_count = {}
leaf_count = tree_param.leaf_count
root_count = internal_count_computer(root.id, tree_param.tree_, leaf_count, internal_count)
if root.id not in internal_count:
internal_count[root_count] = root_count
return internal_count
def update_leaf_count(param):
# in homo sbt, sometimes a leaf covers no sample, so need to add 1 to leaf count
tmp = {}
for i in param.leaf_count:
tmp[i] = param.leaf_count[i]
for i in tmp:
if tmp[i] == 0:
param.leaf_count[i] += 1
def parse_a_tree(param: DecisionTreeModelParam, tree_idx: int, use_missing=False, zero_as_missing=False, learning_rate=0.1, init_score=None):
split_feature = []
split_threshold = []
decision_type = []
internal_weight = []
leaf_weight = []
left, right = [], []
leaf_idx = -1
lgb_node_idx = 0
sbt_lgb_node_map = {}
is_leaf = []
leaf_count = []
internal_count, internal_count_dict = [], {}
has_count_info = len(param.leaf_count) != 0
# compute internal count
if has_count_info:
update_leaf_count(param)
internal_count_dict = compute_internal_count(param) # get internal count from leaf count
# mark leaf nodes and get sbt-lgb node mapping
for node in param.tree_:
is_leaf.append(node.is_leaf)
if not node.is_leaf:
sbt_lgb_node_map[node.id] = lgb_node_idx
lgb_node_idx += 1
for cur_idx, node in enumerate(param.tree_):
if not node.is_leaf:
split_feature.append(node.fid)
# if is hetero model need to decode split point and missing dir
if param.split_maskdict and param.missing_dir_maskdict is not None:
node.bid = param.split_maskdict[node.id]
node.missing_dir = param.missing_dir_maskdict[node.id]
# extract split point and weight
split_threshold.append(node.bid)
internal_weight.append(node.weight)
# add internal count
if has_count_info:
internal_count.append(internal_count_dict[node.id])
if is_leaf[node.left_nodeid]: # generate lgb leaf idx
left.append(leaf_idx)
if has_count_info:
leaf_count.append(param.leaf_count[node.left_nodeid])
leaf_idx -= 1
else:
left.append(sbt_lgb_node_map[node.left_nodeid])
if is_leaf[node.right_nodeid]: # generate lgb leaf idx
right.append(leaf_idx)
if has_count_info:
leaf_count.append(param.leaf_count[node.right_nodeid])
leaf_idx -= 1
else:
right.append(sbt_lgb_node_map[node.right_nodeid])
# get lgb decision type
decision_type.append(get_decision_type(node, use_missing, zero_as_missing))
else:
# regression model need to add init score
if init_score is not None:
score = node.weight * learning_rate + init_score
else:
# leaf value is node.weight * learning_rate in lgb
score = node.weight * learning_rate
leaf_weight.append(score)
leaves_num = len(leaf_weight)
num_cat = 0
# to string
result_str = TREE_TEMPLATE.format(tree_idx, leaves_num, num_cat, list_to_str(split_feature),
list_to_str(split_threshold), list_to_str(decision_type),
list_to_str(left), list_to_str(right), list_to_str(leaf_weight),
list_to_str(internal_weight), learning_rate)
if len(internal_count) != 0:
result_str += 'internal_count={}\n'.format(list_to_str(internal_count))
if len(leaf_count) != 0:
result_str += 'leaf_count={}\n'.format(list_to_str(leaf_count))
return result_str
def parse_feature_importance(param):
feat_importance_str = "feature_importances:\n"
mapping = param.feature_name_fid_mapping
for impt in param.feature_importances:
impt_val = impt.importance
try:
if impt.main == 'split':
impt_val = int(impt_val)
except:
LOGGER.warning("old version protobuf contains no filed 'main'")
feat_importance_str += '{}={}\n'.format(mapping[impt.fid], impt_val)
return feat_importance_str
def parse_parameter(param, meta):
"""
we only keep parameters offered by SBT
"""
tree_meta = meta.tree_meta
num_classes = 1 if meta.task_type == consts.CLASSIFICATION and param.num_classes < 3 else param.num_classes
objective = get_lgb_objective(meta.task_type, num_classes, PARA_OBJECTIVE, need_multi_format=False)
rs = PARA_TEMPLATE.format(objective, meta.num_trees, meta.learning_rate, tree_meta.max_depth,
meta.quantile_meta.bin_num, meta.tree_meta.use_missing + 0,
meta.tree_meta.zero_as_missing + 0,
num_classes, tree_meta.criterion_meta.criterion_param[0],
tree_meta.criterion_meta.criterion_param[1],
tree_meta.min_leaf_node,
tree_meta.min_impurity_split
)
return rs
def sbt_to_lgb(model_param: BoostingTreeModelParam,
model_meta: BoostingTreeModelMeta,
load_feature_importance=True):
"""
Transform sbt model to lgb model
"""
result = ''
# parse header
header_str = parse_header(model_param, model_meta)
use_missing = model_meta.tree_meta.use_missing
zero_as_missing = model_meta.tree_meta.zero_as_missing
learning_rate = model_meta.learning_rate
tree_str_list = []
# parse tree
for idx, param in enumerate(model_param.trees_):
if idx == 0 and model_meta.task_type == consts.REGRESSION: # regression task has init score
init_score = model_param.init_score[0]
else:
init_score = 0
tree_str_list.append(parse_a_tree(param, idx, use_missing, zero_as_missing, learning_rate, init_score))
# add header and tree str to result
result += header_str + '\n'
for s in tree_str_list:
result += s
result += SPLIT
result += END_OF_TREE
# handle feature importance
if load_feature_importance:
feat_importance_str = parse_feature_importance(model_param)
result += SPLIT+feat_importance_str
# parameters
para_str = parse_parameter(model_param, model_meta)
result += '\n'+para_str+'\n'+END_OF_PARA+'\n'
result += '\npandas_categorical:[]\n'
return result
def save_lgb(model: lgb.Booster, path):
model_str = model.model_to_string()
f = open(path, 'w')
f.write(model_str)
f.close()
def load_lgb(path):
f = open(path, 'r')
model_str = f.read()
f.close()
lgb_model = lgb.Booster(model_str=model_str)
return lgb_model
class HomoSBTComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoSecureboost']
def convert(self, model_dict):
param_obj = model_dict["HomoSecureBoostingTreeGuestParam"]
meta_obj = model_dict["HomoSecureBoostingTreeGuestMeta"]
lgb_model_str = sbt_to_lgb(param_obj, meta_obj)
lgb_model = lgb.Booster(model_str=lgb_model_str)
return lgb_model
|
FederatedAI/FATE
|
python/federatedml/protobuf/homo_model_convert/lightgbm/gbdt.py
|
Python
|
apache-2.0
| 11,746
|
# This is the second 'http' example, demoing the interplay of 'drivers'
from snabbdom import span, input, div
'''
!!! If not explicitly imported, the (slider) 'input' will be overridden by the (dialog box) 'input' from the Python runtime.
This should somehow be avoidable without introducing lots of aliases, e.g. by a kind of dummy import.
??? Why was this different before ES6 modules were introduced?
'''
d = dict
def log(f):
'''
debug tool, you can insert .map(log) anywhere
I used it to compare first buggy version with the js version, which I
configured to the "jsapp" DOM element.
Both worked concurrently.
'''
console.log(f)
return f
def LabeledSlider(sources):
'A cycle js component'
dom_source = sources.DOM
propsS = sources.props
new_valueS = dom_source \
.select('.slider') \
.events('input') \
.map(lambda ev: ev.target.value)
stateS = propsS \
.map(lambda props: new_valueS \
.map(lambda val: {
'label': props.label,
'unit' : props.unit,
'min' : props.min,
'max' : props.max,
'value': val}) \
.startWith(props)
) \
.flatten() \
.remember() # https://github.com/staltz/xstream/wiki/Migrating-from-RxJS
# all streams are hot, the start with would be forgotten w/o this:
vdomS = stateS \
.map(lambda state: \
div('.labeled-slider', [
span('.label',
state.label + ' ' + state.value + state.unit),
input('.slider', {'attrs': {
'type': 'range', 'min': state.min,
'max': state.max, 'value': state.value}}),
]))
sinks = d(DOM=vdomS, value=stateS.map(lambda state: state.value))
return sinks
def main(sources):
xs = xstream['default']
propsS = xs.of( d(label='Radius', unit='', min=20, value=50, max=80) )
labeled_slider = LabeledSlider({'DOM': sources.DOM, 'props': propsS})
child_vdomS = labeled_slider.DOM
child_valueS = labeled_slider.value
def render(v):
value, child_vdom = v
return div([
child_vdom,
div({'style': {
'backgroundColor': 'green',
'width': str(value) + 'px',
'height': str(value) + 'px',
'borderRadius': str(value * 0.5) + 'px'
}})])
vdomS = xs.combine(child_valueS, child_vdomS).map(log).map(render)
return {'DOM': vdomS}
Cycle.run(main, {
'DOM': makeDOMDriver('#app')
});
|
QQuick/Transcrypt
|
transcrypt/demos/cyclejs_demo/component_demos/labeled_slider/labeled_slider.py
|
Python
|
apache-2.0
| 2,859
|
#!/usr/bin/env python
# Copyright 2015, Kevin Carter.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import sys
import cloudlib
PACKAGES = [
'cloudlib'
]
with open('requirements.txt') as f:
required = f.read().splitlines()
if sys.version_info < (2, 6, 0):
sys.stderr.write('This App Presently requires Python 2.6.0 or greater\n')
raise SystemExit(
'\nUpgrade python because you version of it is VERY deprecated\n'
)
elif sys.version_info < (2, 7, 0):
if 'argparse' not in required:
required.append('argparse')
with open('README', 'rb') as r_file:
LDINFO = r_file.read()
LDINFO = LDINFO.decode('utf-8')
setuptools.setup(
name=cloudlib.__appname__,
version=cloudlib.__version__,
author=cloudlib.__author__,
author_email=cloudlib.__email__,
description=cloudlib.__description__,
long_description=LDINFO,
license='Apache License Version 2.0',
packages=PACKAGES,
url=cloudlib.__url__,
install_requires=required,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
cloudnull/cloudlib
|
setup.py
|
Python
|
apache-2.0
| 2,113
|
# bibframe
from versa import I
BFZ = I('http://bibfra.me/vocab/')
BFLC = I('http://bibframe.org/vocab/')
#A way to register services to specialize bibframe.py processing
#Maps URL to callable
g_services = {}
def register_service(coro, iri=None):
iri = iri or coro.iri
g_services[iri] = coro
return
|
uogbuji/pybibframe
|
lib/__init__.py
|
Python
|
apache-2.0
| 315
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen.thrift.target_types import (
ThriftSourcesGeneratorTarget,
ThriftSourceTarget,
)
from pants.engine.target import BoolField
class ScroogeFinagleBoolField(BoolField):
alias = "finagle"
default = False
help = "If True, then also generate Finagle classes for services when using Scrooge as the Thrift generator."
def rules():
return (
ThriftSourceTarget.register_plugin_field(ScroogeFinagleBoolField),
ThriftSourcesGeneratorTarget.register_plugin_field(ScroogeFinagleBoolField),
)
|
pantsbuild/pants
|
src/python/pants/backend/codegen/thrift/scrooge/additional_fields.py
|
Python
|
apache-2.0
| 683
|
"""The code in this module is mostly copy/pasted out of the distutils2 source
code, as recommended by Tarek Ziade. As such, it may be subject to some change
as distutils2 development continues, and will have to be kept up to date.
I didn't want to use it directly from distutils2 itself, since I do not want it
to be an installation dependency for our packages yet--it is still too unstable
(the latest version on PyPI doesn't even install).
"""
# These first two imports are not used, but are needed to get around an
# irritating Python bug that can crop up when using ./setup.py test.
# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html
try:
import multiprocessing
except ImportError:
pass
import logging
import os
import re
import sys
import traceback
from collections import defaultdict
import distutils.ccompiler
from distutils import log
from distutils.errors import (DistutilsOptionError, DistutilsModuleError,
DistutilsFileError)
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools.extension import Extension
from .extern.six import moves as m
RawConfigParser = m.configparser.RawConfigParser
# A simplified RE for this; just checks that the line ends with version
# predicates in ()
_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$')
# Mappings from setup() keyword arguments to setup.cfg options;
# The values are (section, option) tuples, or simply (section,) tuples if
# the option has the same name as the setup() argument
D1_D2_SETUP_ARGS = {
"name": ("metadata",),
"version": ("metadata",),
"author": ("metadata",),
"author_email": ("metadata",),
"maintainer": ("metadata",),
"maintainer_email": ("metadata",),
"url": ("metadata", "home_page"),
"description": ("metadata", "summary"),
"keywords": ("metadata",),
"long_description": ("metadata", "description"),
"download-url": ("metadata",),
"classifiers": ("metadata", "classifier"),
"platforms": ("metadata", "platform"), # **
"license": ("metadata",),
# Use setuptools install_requires, not
# broken distutils requires
"install_requires": ("metadata", "requires_dist"),
"setup_requires": ("metadata", "setup_requires_dist"),
"provides": ("metadata", "provides_dist"), # **
"obsoletes": ("metadata", "obsoletes_dist"), # **
"package_dir": ("files", 'packages_root'),
"packages": ("files",),
"package_data": ("files",),
"data_files": ("files",),
"scripts": ("files",),
"py_modules": ("files", "modules"), # **
"cmdclass": ("global", "commands"),
# Not supported in distutils2, but provided for
# backwards compatibility with setuptools
"use_2to3": ("backwards_compat", "use_2to3"),
"zip_safe": ("backwards_compat", "zip_safe"),
"tests_require": ("backwards_compat", "tests_require"),
"dependency_links": ("backwards_compat",),
"include_package_data": ("backwards_compat",),
}
# setup() arguments that can have multiple values in setup.cfg
MULTI_FIELDS = ("classifiers",
"platforms",
"install_requires",
"provides",
"obsoletes",
"packages",
"package_data",
"data_files",
"scripts",
"py_modules",
"dependency_links",
"setup_requires",
"tests_require",
"cmdclass")
# setup() arguments that contain boolean values
BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data")
CSV_FIELDS = ("keywords",)
log.set_verbosity(log.INFO)
def resolve_name(name):
"""Resolve a name like ``module.object`` to an object and return it.
Raise ImportError if the module or name is not found.
"""
parts = name.split('.')
cursor = len(parts) - 1
module_name = parts[:cursor]
attr_name = parts[-1]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=[attr_name])
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
attr_name = parts[cursor]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def cfg_to_args(path='setup.cfg'):
""" Distutils2 to distutils1 compatibility util.
This method uses an existing setup.cfg to generate a dictionary of
keywords that can be used by distutils.core.setup(kwargs**).
:param file:
The setup.cfg path.
:raises DistutilsFileError:
When the setup.cfg file is not found.
"""
# The method source code really starts here.
parser = RawConfigParser()
if not os.path.exists(path):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(path))
parser.read(path)
config = {}
for section in parser.sections():
config[section] = dict(parser.items(section))
# Run setup_hooks, if configured
setup_hooks = has_get_option(config, 'global', 'setup_hooks')
package_dir = has_get_option(config, 'files', 'packages_root')
# Add the source package directory to sys.path in case it contains
# additional hooks, and to make sure it's on the path before any existing
# installations of the package
if package_dir:
package_dir = os.path.abspath(package_dir)
sys.path.insert(0, package_dir)
try:
if setup_hooks:
setup_hooks = split_multiline(setup_hooks)
for hook in setup_hooks:
hook_fn = resolve_name(hook)
try :
hook_fn(config)
except SystemExit:
log.error('setup hook %s terminated the installation')
except:
e = sys.exc_info()[1]
log.error('setup hook %s raised exception: %s\n' %
(hook, e))
log.error(traceback.format_exc())
sys.exit(1)
kwargs = setup_cfg_to_setup_kwargs(config)
register_custom_compilers(config)
ext_modules = get_extension_modules(config)
if ext_modules:
kwargs['ext_modules'] = ext_modules
entry_points = get_entry_points(config)
if entry_points:
kwargs['entry_points'] = entry_points
wrap_commands(kwargs)
# Handle the [files]/extra_files option
extra_files = has_get_option(config, 'files', 'extra_files')
if extra_files:
extra_files = split_multiline(extra_files)
# Let's do a sanity check
for filename in extra_files:
if not os.path.exists(filename):
raise DistutilsFileError(
'%s from the extra_files option in setup.cfg does not '
'exist' % filename)
# Unfortunately the only really sensible way to do this is to
# monkey-patch the manifest_maker class
@monkeypatch_method(manifest_maker)
def add_defaults(self, extra_files=extra_files, log=log):
log.info('[d2to1] running patched manifest_maker command '
'with extra_files support')
add_defaults._orig(self)
self.filelist.extend(extra_files)
finally:
# Perform cleanup if any paths were added to sys.path
if package_dir:
sys.path.pop(0)
return kwargs
def setup_cfg_to_setup_kwargs(config):
"""Processes the setup.cfg options and converts them to arguments accepted
by setuptools' setup() function.
"""
kwargs = {}
for arg in D1_D2_SETUP_ARGS:
if len(D1_D2_SETUP_ARGS[arg]) == 2:
# The distutils field name is different than distutils2's.
section, option = D1_D2_SETUP_ARGS[arg]
elif len(D1_D2_SETUP_ARGS[arg]) == 1:
# The distutils field name is the same thant distutils2's.
section = D1_D2_SETUP_ARGS[arg][0]
option = arg
in_cfg_value = has_get_option(config, section, option)
if not in_cfg_value:
# There is no such option in the setup.cfg
if arg == "long_description":
in_cfg_value = has_get_option(config, section,
"description_file")
if in_cfg_value:
in_cfg_value = split_multiline(in_cfg_value)
value = ''
for filename in in_cfg_value:
description_file = open(filename)
try:
value += description_file.read().strip() + '\n\n'
finally:
description_file.close()
in_cfg_value = value
else:
continue
if arg in CSV_FIELDS:
in_cfg_value = split_csv(in_cfg_value)
if arg in MULTI_FIELDS:
in_cfg_value = split_multiline(in_cfg_value)
elif arg in BOOL_FIELDS:
# Provide some flexibility here...
if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):
in_cfg_value = True
else:
in_cfg_value = False
if in_cfg_value:
if arg in ('install_requires', 'tests_require'):
# Replaces PEP345-style version specs with the sort expected by
# setuptools
in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred)
for pred in in_cfg_value]
elif arg == 'package_dir':
in_cfg_value = {'': in_cfg_value}
elif arg in ('package_data', 'data_files'):
data_files = {}
firstline = True
prev = None
for line in in_cfg_value:
if '=' in line:
key, value = line.split('=', 1)
key, value = (key.strip(), value.strip())
if key in data_files:
# Multiple duplicates of the same package name;
# this is for backwards compatibility of the old
# format prior to d2to1 0.2.6.
prev = data_files[key]
prev.extend(value.split())
else:
prev = data_files[key.strip()] = value.split()
elif firstline:
raise DistutilsOptionError(
'malformed package_data first line %r (misses '
'"=")' % line)
else:
prev.extend(line.strip().split())
firstline = False
if arg == 'data_files':
# the data_files value is a pointlessly different structure
# from the package_data value
data_files = data_files.items()
in_cfg_value = data_files
elif arg == 'cmdclass':
cmdclass = {}
dist = Distribution()
for cls in in_cfg_value:
cls = resolve_name(cls)
cmd = cls(dist)
cmdclass[cmd.get_command_name()] = cls
in_cfg_value = cmdclass
kwargs[arg] = in_cfg_value
return kwargs
def register_custom_compilers(config):
"""Handle custom compilers; this has no real equivalent in distutils, where
additional compilers could only be added programmatically, so we have to
hack it in somehow.
"""
compilers = has_get_option(config, 'global', 'compilers')
if compilers:
compilers = split_multiline(compilers)
for compiler in compilers:
compiler = resolve_name(compiler)
# In distutils2 compilers these class attributes exist; for
# distutils1 we just have to make something up
if hasattr(compiler, 'name'):
name = compiler.name
else:
name = compiler.__name__
if hasattr(compiler, 'description'):
desc = compiler.description
else:
desc = 'custom compiler %s' % name
module_name = compiler.__module__
# Note; this *will* override built in compilers with the same name
# TODO: Maybe display a warning about this?
cc = distutils.ccompiler.compiler_class
cc[name] = (module_name, compiler.__name__, desc)
# HACK!!!! Distutils assumes all compiler modules are in the
# distutils package
sys.modules['distutils.' + module_name] = sys.modules[module_name]
def get_extension_modules(config):
"""Handle extension modules"""
EXTENSION_FIELDS = ("sources",
"include_dirs",
"define_macros",
"undef_macros",
"library_dirs",
"libraries",
"runtime_library_dirs",
"extra_objects",
"extra_compile_args",
"extra_link_args",
"export_symbols",
"swig_opts",
"depends")
ext_modules = []
for section in config:
if ':' in section:
labels = section.split(':', 1)
else:
# Backwards compatibility for old syntax; don't use this though
labels = section.split('=', 1)
labels = [l.strip() for l in labels]
if (len(labels) == 2) and (labels[0] == 'extension'):
ext_args = {}
for field in EXTENSION_FIELDS:
value = has_get_option(config, section, field)
# All extension module options besides name can have multiple
# values
if not value:
continue
value = split_multiline(value)
if field == 'define_macros':
macros = []
for macro in value:
macro = macro.split('=', 1)
if len(macro) == 1:
macro = (macro[0].strip(), None)
else:
macro = (macro[0].strip(), macro[1].strip())
macros.append(macro)
value = macros
ext_args[field] = value
if ext_args:
if 'name' not in ext_args:
ext_args['name'] = labels[1]
ext_modules.append(Extension(ext_args.pop('name'),
**ext_args))
return ext_modules
def get_entry_points(config):
"""Process the [entry_points] section of setup.cfg to handle setuptools
entry points. This is, of course, not a standard feature of
distutils2/packaging, but as there is not currently a standard alternative
in packaging, we provide support for them.
"""
if not 'entry_points' in config:
return {}
return dict((option, split_multiline(value))
for option, value in config['entry_points'].items())
def wrap_commands(kwargs):
dist = Distribution()
# This should suffice to get the same config values and command classes
# that the actual Distribution will see (not counting cmdclass, which is
# handled below)
dist.parse_config_files()
for cmd, _ in dist.get_command_list():
hooks = {}
for opt, val in dist.get_option_dict(cmd).items():
val = val[1]
if opt.startswith('pre_hook.') or opt.startswith('post_hook.'):
hook_type, alias = opt.split('.', 1)
hook_dict = hooks.setdefault(hook_type, {})
hook_dict[alias] = val
if not hooks:
continue
if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']:
cmdclass = kwargs['cmdclass'][cmd]
else:
cmdclass = dist.get_command_class(cmd)
new_cmdclass = wrap_command(cmd, cmdclass, hooks)
kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass
def wrap_command(cmd, cmdclass, hooks):
def run(self, cmdclass=cmdclass):
self.run_command_hooks('pre_hook')
cmdclass.run(self)
self.run_command_hooks('post_hook')
return type(cmd, (cmdclass, object),
{'run': run, 'run_command_hooks': run_command_hooks,
'pre_hook': hooks.get('pre_hook'),
'post_hook': hooks.get('post_hook')})
def run_command_hooks(cmd_obj, hook_kind):
"""Run hooks registered for that command and phase.
*cmd_obj* is a finalized command object; *hook_kind* is either
'pre_hook' or 'post_hook'.
"""
if hook_kind not in ('pre_hook', 'post_hook'):
raise ValueError('invalid hook kind: %r' % hook_kind)
hooks = getattr(cmd_obj, hook_kind, None)
if hooks is None:
return
for hook in hooks.values():
if isinstance(hook, str):
try:
hook_obj = resolve_name(hook)
except ImportError:
err = sys.exc_info()[1] # For py3k
raise DistutilsModuleError('cannot find hook %s: %s' %
(hook,err))
else:
hook_obj = hook
if not hasattr(hook_obj, '__call__'):
raise DistutilsOptionError('hook %r is not callable' % hook)
log.info('running %s %s for command %s',
hook_kind, hook, cmd_obj.get_command_name())
try :
hook_obj(cmd_obj)
except:
e = sys.exc_info()[1]
log.error('hook %s raised exception: %s\n' % (hook, e))
log.error(traceback.format_exc())
sys.exit(1)
def has_get_option(config, section, option):
if section in config and option in config[section]:
return config[section][option]
elif section in config and option.replace('_', '-') in config[section]:
return config[section][option.replace('_', '-')]
else:
return False
def split_multiline(value):
"""Special behaviour when we have a multi line options"""
value = [element for element in
(line.strip() for line in value.split('\n'))
if element]
return value
def split_csv(value):
"""Special behaviour when we have a comma separated options"""
value = [element for element in
(chunk.strip() for chunk in value.split(','))
if element]
return value
def monkeypatch_method(cls):
"""A function decorator to monkey-patch a method of the same name on the
given class.
"""
def wrapper(func):
orig = getattr(cls, func.__name__, None)
if orig and not hasattr(orig, '_orig'): # Already patched
setattr(func, '_orig', orig)
setattr(cls, func.__name__, func)
return func
return wrapper
# The following classes are used to hack Distribution.command_options a bit
class DefaultGetDict(defaultdict):
"""Like defaultdict, but the get() method also sets and returns the default
value.
"""
def get(self, key, default=None):
if default is None:
default = self.default_factory()
return super(DefaultGetDict, self).setdefault(key, default)
class IgnoreDict(dict):
"""A dictionary that ignores any insertions in which the key is a string
matching any string in `ignore`. The ignore list can also contain wildcard
patterns using '*'.
"""
def __init__(self, ignore):
self.__ignore = re.compile(r'(%s)' % ('|'.join(
[pat.replace('*', '.*')
for pat in ignore])))
def __setitem__(self, key, val):
if self.__ignore.match(key):
return
super(IgnoreDict, self).__setitem__(key, val)
|
ioram7/keystone-federado-pgid2013
|
build/d2to1/d2to1/util.py
|
Python
|
apache-2.0
| 20,437
|
class ReleaseExistsError(Exception):
pass
class InvalidPath(Exception):
pass
class DocumentNotFound(Exception):
pass
|
yshalenyk/ocds.storage
|
ocds/storage/errors.py
|
Python
|
apache-2.0
| 133
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Ceilometer notify daemon."""
import mock
from oslo_config import cfg
from oslo_config import fixture as fixture_config
import oslo_messaging
from oslo_utils import fileutils
from oslotest import mockpatch
import six
import yaml
from ceilometer.event import endpoint as event_endpoint
from ceilometer import pipeline
from ceilometer import publisher
from ceilometer.publisher import test
from ceilometer.tests import base as tests_base
TEST_NOTICE_CTXT = {
u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'is_admin': True,
u'project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'quota_class': None,
u'read_deleted': u'no',
u'remote_address': u'10.0.2.15',
u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'roles': [u'admin'],
u'timestamp': u'2012-05-08T20:23:41.425105',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
}
TEST_NOTICE_METADATA = {
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
TEST_NOTICE_PAYLOAD = {
u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'fixed_ips': [{u'address': u'10.0.0.2',
u'floating_ips': [],
u'meta': {},
u'type': u'fixed',
u'version': 4}],
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47.985999',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
}
cfg.CONF.import_opt('store_events', 'ceilometer.notification',
group='notification')
class TestEventEndpoint(tests_base.BaseTestCase):
def get_publisher(self, url, namespace=''):
fake_drivers = {'test://': test.TestPublisher,
'except://': test.TestPublisher}
return fake_drivers[url](url)
def _setup_pipeline(self, publishers):
ev_pipeline = yaml.dump({
'sources': [{
'name': 'test_event',
'events': ['test.test'],
'sinks': ['test_sink']
}],
'sinks': [{
'name': 'test_sink',
'publishers': publishers
}]
})
if six.PY3:
ev_pipeline = ev_pipeline.encode('utf-8')
ev_pipeline_cfg_file = fileutils.write_to_tempfile(
content=ev_pipeline, prefix="event_pipeline", suffix="yaml")
self.CONF.set_override('event_pipeline_cfg_file',
ev_pipeline_cfg_file)
ev_pipeline_mgr = pipeline.setup_event_pipeline()
return ev_pipeline_mgr
def _setup_endpoint(self, publishers):
ev_pipeline_mgr = self._setup_pipeline(publishers)
self.endpoint = event_endpoint.EventsNotificationEndpoint(
ev_pipeline_mgr)
self.endpoint.event_converter = mock.MagicMock()
self.endpoint.event_converter.to_event.return_value = mock.MagicMock(
event_type='test.test')
def setUp(self):
super(TestEventEndpoint, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF([])
self.CONF.set_override("connection", "log://", group='database')
self.CONF.set_override("store_events", True, group="notification")
self.setup_messaging(self.CONF)
self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher',
side_effect=self.get_publisher))
self.fake_publisher = mock.Mock()
self.useFixture(mockpatch.Patch(
'ceilometer.publisher.test.TestPublisher',
return_value=self.fake_publisher))
def test_message_to_event(self):
self._setup_endpoint(['test://'])
self.endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise',
'compute.instance.create.end',
TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA)
def test_message_to_event_bad_event(self):
self._setup_endpoint(['test://'])
self.fake_publisher.publish_events.side_effect = Exception
self.CONF.set_override("ack_on_event_error", False,
group="notification")
message = {'event_type': "foo", 'message_id': "abc"}
with mock.patch("ceilometer.pipeline.LOG") as mock_logger:
ret = self.endpoint.process_notification(message)
self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret)
exception_mock = mock_logger.exception
self.assertIn('Exit after error from publisher',
exception_mock.call_args_list[0][0][0])
def test_message_to_event_bad_event_multi_publish(self):
self._setup_endpoint(['test://', 'except://'])
self.fake_publisher.publish_events.side_effect = Exception
self.CONF.set_override("ack_on_event_error", False,
group="notification")
message = {'event_type': "foo", 'message_id': "abc"}
with mock.patch("ceilometer.pipeline.LOG") as mock_logger:
ret = self.endpoint.process_notification(message)
self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret)
exception_mock = mock_logger.exception
self.assertIn('Continue after error from publisher',
exception_mock.call_args_list[0][0][0])
|
mathslinux/ceilometer
|
ceilometer/tests/unit/event/test_endpoint.py
|
Python
|
apache-2.0
| 6,738
|
from setuptools import setup
import codecs
with codecs.open('README.md', encoding='utf-8') as readme_file:
long_description = readme_file.read()
setup(
name="hep_ml",
version='0.6.0',
description="Machine Learning for High Energy Physics",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/arogozhnikov/hep_ml',
# Author details
author='Alex Rogozhnikov',
# Choose your license
license='Apache 2.0',
packages=['hep_ml', 'hep_ml.experiments'],
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7 ',
'Programming Language :: Python :: 3.5 ',
'Programming Language :: Python :: 3.6 ',
'Programming Language :: Python :: 3.7 ',
],
# What does your project relate to?
keywords='machine learning, supervised learning, '
'uncorrelated methods of machine learning, high energy physics, particle physics',
# List run-time dependencies here. These will be installed by pip when your project is installed.
install_requires=[
'numpy >= 1.9',
'scipy >= 0.15.0',
'pandas >= 0.14.0',
'scikit-learn >= 0.19',
'theano >= 1.0.2',
'six',
],
)
|
iamfullofspam/hep_ml
|
setup.py
|
Python
|
apache-2.0
| 1,649
|
import itertools
from streamparse import storm
class WordSpout(storm.Spout):
def initialize(self, stormconf, context):
self.words = itertools.cycle(['dog', 'cat',
'zebra', 'elephant'])
def nextTuple(self):
word = next(self.words)
storm.emit([word])
WordSpout().run()
|
thedrow/streamparse
|
examples/leinproj/_resources/resources/words.py
|
Python
|
apache-2.0
| 342
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that ensures request ID.
It ensures to assign request ID for each API request and set it to
request environment. The request ID is also added to API response.
"""
from charging.openstack.common import context
from charging.openstack.common.middleware import base
ENV_REQUEST_ID = 'openstack.request_id'
HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
class RequestIdMiddleware(base.Middleware):
def process_request(self, req):
self.req_id = context.generate_request_id()
req.environ[ENV_REQUEST_ID] = self.req_id
def process_response(self, response):
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id)
return response
|
zhhf/charging
|
charging/openstack/common/middleware/request_id.py
|
Python
|
apache-2.0
| 1,334
|
import time
from server import app
from server.db import diary as db_diary
from server.logic import user as logic_user
from server.util import safetyutils
from server.db import event_log as db_log
logger = app.logger
def sync_data(user_id, sync_token, sync_items, need_pull):
"""Upsert client data, sync_items, into server db; fetch changed data from server and return to client.
1. Decrypt sync_token and fetch last_sync_time;
2. Push: Traverse sync_items, execute upsert/delete action for specified table. Record sync_count for client usage;
3. If need_pull: Get changed data since last_sync_time, comparing with time_modified(upsert) & time_removed(delete);
4. Pull: return changed data to user.
sync_items = [
{
'Diary': {
'create': {
'uuid': "2F69DEB5-B631-40DD-A65E-AFE9A0882275",
'time': 1477139399,
'title': 'this is a new diary',
'content': 'today is a good day',
},
},
},
{
'Diary': {
'update': {
'uuid': "b8f4428a-98e1-11e6-8155-a45e60dcd7ed",
'time': 1477139400,
'title': 'I update this title',
'content': 'I\'m updated content',
}
}
},
{
'Diary': {
'delete': {
'uuid': "b9000ff0-98e1-11e6-91f7-a45e60dcd7ed",
'time': 1477139401,
}
}
}
]
Return:
{
'synced_count': 2,
'sync_token': 'jym0JTE-svI8iDOPp-6e_UMe6dYOVVNSVes8pzZCXDd_I4xn3CYT-oyGVjaCgKgtHO' (based on new last_sync_time),
'Diary': {
'delete': [
{
'uuid': "04B977C7-6F7F-4D36-BFDA-FE98C5241DB0",
'title': 'I'm created by other client',
'content': 'I'm created by other client',
'time': 1477139340,
}
{
'uuid': "04B977C7-6F7F-4D36-BFDA-FE98C5241ABC",
'title': 'I'm created by other client',
'content': 'I'm created by other client',
'time': 1477139340,
}
],
'upsert': [
{
'uuid': "04B977C7-6F7F-4D36-BFDC-FE98C5241DB0",
'time': 1477139399,
'title': 'I'm created by other client',
'content': 'I'm created by other client',
},
{
'uuid': "04B977C7-6F7F-4D36-BFDC-FE98C5241DB0",
'time': 1477139399,
'title': 'I'm created by other client',
'content': 'I'm created by other client',
}
]
}
}
"""
if user_id is None or logic_user.get_user_by_id(user_id) is None:
return {}
last_sync_info = safetyutils.decrypt_sync_token(sync_token)
last_sync_time = last_sync_info.get("last_sync_time", 0)
synced_count = _push(user_id, sync_items)
result = {
'synced_count': synced_count
}
if need_pull:
pull_data = _pull(user_id, last_sync_time)
result.update(pull_data)
return result
def _push(user_id, sync_items):
"""Check each sync_items, execute create/update/delete action in database.
"""
synced_count = 0
try:
for item in sync_items:
for table, action_data in item.iteritems():
print "table", table, "; action & data", action_data
if table == db_diary.DB_NAME:
_push_diary_data_by_action(user_id, action_data.keys()[0], action_data.values()[0])
synced_count += 1
except Exception as e:
print e
return synced_count
def _pull(user_id, last_sync_time):
"""1. Extract All Diary new changes since last_sync_time, comparing with time_modified
2. Generate new sync_token and return to user.
Return:
{
'sync_token': 'jym0JTE-svI8iDOPp-6e_UMe6dYOVVNSVes8pzZCXDd_I4xn3CYT-oyGVjaCgKgtHO' (based on new last_sync_time),
'Diary': {
'delete': [
{
'uuid': "04B977C7-6F7F-4D36-BFDA-FE98C5241DB0",
'time': 1477139340,
}
{
'uuid': "04B977C7-6F7F-4D36-BFDA-FE98C5241ABC",
'time': 1477139340,
}
],
'upsert': [
{
'uuid': "04B977C7-6F7F-4D36-BFDC-FE98C5241DB0",
'time': 1477139399,
'title': 'I'm created by other client',
'content': 'I'm created by other client',
},
{
'uuid': "04B977C7-6F7F-4D36-BFDC-FE98C5241DB0",
'time': 1477139399,
'title': 'I'm created by other client',
'content': 'I'm created by other client',
}
]
}
}
"""
result = {}
changed_diary_list = db_diary.get_diary_list_since_last_sync(user_id, last_sync_time)
delete = []
upsert = []
for diary in changed_diary_list:
if diary.get('time_removed') == 0:
upsert.append(diary)
else:
delete.append(diary)
if len(delete):
result['delete'] = delete
if len(upsert):
result['upsert'] = upsert
new_sync_token = safetyutils.encrypt_sync_token(int(time.time()))
result['sync_token'] = new_sync_token
return result
def _push_diary_data_by_action(user_id, action, data):
"""
Update server diary database based on client changes.
action: 'create'
data: {
'uuid': "2F69DEB5-B631-40DD-A65E-AFE9A0882275",
'time': 1477139399,
'title': 'this is a new diary',
'content': 'today is a good day',
}
action:'delete'
data: {
'uuid': "04B977C7-6F7F-4D36-BFDA-FE98C5241DB0"
'time': 1477139399,
}
"""
logger.info('_push_diary_data_by_action: user_id:%s; action:%s; data:%s', user_id, action, data)
if action == 'create' or action == 'update':
db_diary.upsert_diary(user_id, data.get('uuid'), data)
elif action == 'delete':
db_diary.delete_diary(user_id, data.get('uuid'), data.get('time'))
else:
return
def sync_event_log(user_id, log_items):
"""
sync log from client
log_items = [
{
event_name : 'home_page_impression'
page_source : None
time_created : 14000000
},
{
event_name : 'home_page_impression'
page_source : None
time_created : 14000000
},
{
event_name : 'home_page_impression'
page_source : None
time_created : 14000000
},
{
event_name : 'home_page_impression'
page_source : None
time_created : 14000000
},
]
"""
synced_count = 0
if user_id is None or log_items is None:
return {
'synced_count': 0
}
for item in log_items:
if 'page_source' not in item:
item['page_source'] = ''
db_log.add_event_log(user_id, item)
synced_count += 1
return {
'synced_count': synced_count
}
|
wingjay/jianshi
|
server/server/logic/sync.py
|
Python
|
apache-2.0
| 7,553
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class NoFilter(object):
def __init__(self):
pass
def __call__(self, x, update=True):
return np.asarray(x)
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape=None):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
# Unvectorized update of the running statistics.
assert x.shape == self._M.shape, ("x.shape = {}, self.shape = {}"
.format(x.shape, self._M.shape))
n1 = self._n
self._n += 1
if self._n == 1:
self._M[...] = x
else:
delta = x - self._M
self._M[...] += delta / self._n
self._S[...] += delta * delta * n1 / self._n
def update(self, other):
n1 = self._n
n2 = other._n
n = n1 + n2
delta = self._M - other._M
delta2 = delta * delta
M = (n1 * self._M + n2 * other._M) / n
S = self._S + other._S + delta2 * n1 * n2 / n
self._n = n
self._M = M
self._S = S
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class MeanStdFilter(object):
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
x = np.asarray(x)
if update:
if len(x.shape) == len(self.rs.shape) + 1:
# The vectorized case.
for i in range(x.shape[0]):
self.rs.push(x[i])
else:
# The unvectorized case.
self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def test_running_stat():
for shp in ((), (3,), (3, 4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0)
assert np.allclose(rs.var, v)
def test_combining_stat():
for shape in [(), (3,), (3, 4)]:
li = []
rs1 = RunningStat(shape)
rs2 = RunningStat(shape)
rs = RunningStat(shape)
for _ in range(5):
val = np.random.randn(*shape)
rs1.push(val)
rs.push(val)
li.append(val)
for _ in range(9):
rs2.push(val)
rs.push(val)
li.append(val)
rs1.update(rs2)
assert np.allclose(rs.mean, rs1.mean)
assert np.allclose(rs.std, rs1.std)
test_running_stat()
test_combining_stat()
|
alanamarzoev/ray
|
python/ray/rllib/ppo/filter.py
|
Python
|
apache-2.0
| 3,442
|
#-*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import sys
import signal
import random
import time
import logging
import requests
import multiprocessing
import time
import os
from turbo import app
from turbo.conf import app_config
from turbo import register
from turbo.session import RedisStore
app_config.app_name = 'app_test'
app_config.web_application_setting = {
'xsrf_cookies': False,
'cookie_secret': 'asdf/asdfiw872*&^2/'
}
from turbo.test.util import unittest
class HomeHandler(app.BaseHandler):
session_initializer = {
'time': time.time(),
'uid': None,
}
def get(self):
assert self.session.uid is None
assert self.session.session_id is not None
self.write('get')
def post(self):
self.session.uid = '7787'
self.write('post')
def put(self):
assert self.session.uid == '7787'
self.write('put')
class RedisStoreHandler(app.BaseHandler):
session_initializer = {
'time': time.time(),
'uid': None,
}
session_store = RedisStore(timeout=3600)
def get(self):
assert self.session.uid is None
assert self.session.session_id is not None
self.write('get')
def post(self):
self.session.uid = '7787'
self.write('post')
def put(self):
assert self.session.uid == '7787'
self.write('put')
def run_server():
register.register_url('/', HomeHandler)
register.register_url('/redis', RedisStoreHandler)
app.start()
class SessionTest(unittest.TestCase):
def setUp(self):
server = multiprocessing.Process(target=run_server)
server.start()
self.home_url = 'http://localhost:8888'
self.redis_url = 'http://localhost:8888/redis'
self.pid = server.pid
time.sleep(1)
def tearDown(self):
os.kill(self.pid, signal.SIGKILL)
def test_session(self):
resp = requests.get(self.home_url, headers={'refer':'http://127.0.0.1:8888'})
self.assertEqual(resp.status_code, 200)
cookies = resp.cookies
resp = requests.post(self.home_url, cookies=cookies)
self.assertEqual(resp.status_code, 200)
resp = requests.put(self.home_url, cookies=cookies)
self.assertEqual(resp.status_code, 200)
def test_redis_store_session(self):
resp = requests.get(self.redis_url, headers={'refer':'http://127.0.0.1:8888'})
self.assertEqual(resp.status_code, 200)
cookies = resp.cookies
resp = requests.post(self.redis_url, cookies=cookies)
self.assertEqual(resp.status_code, 200)
resp = requests.put(self.redis_url, cookies=cookies)
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
tao12345666333/app-turbo
|
turbo/test/session_test.py
|
Python
|
apache-2.0
| 2,845
|
"""
tests.pytests.unit.beacons.test_load
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Load beacon test cases
"""
import pytest
import salt.beacons.load as load
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {load: {"__context__": {}, "__salt__": {}}}
def test_non_list_config():
config = {}
ret = load.validate(config)
assert ret == (False, "Configuration for load beacon must be a list.")
def test_empty_config():
config = [{}]
ret = load.validate(config)
assert ret == (False, "Averages configuration is required for load beacon.")
@pytest.mark.skip_on_windows(reason="os.getloadavg not available on Windows")
def test_load_match():
with patch("os.getloadavg", MagicMock(return_value=(1.82, 1.84, 1.56))):
config = [
{
"averages": {"1m": [0.0, 2.0], "5m": [0.0, 1.5], "15m": [0.0, 1.0]},
"emitatstartup": True,
"onchangeonly": False,
}
]
ret = load.validate(config)
assert ret == (True, "Valid beacon configuration")
_expected_return = [{"1m": 1.82, "5m": 1.84, "15m": 1.56}]
ret = load.beacon(config)
assert ret == _expected_return
|
saltstack/salt
|
tests/pytests/unit/beacons/test_load.py
|
Python
|
apache-2.0
| 1,268
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.app_engine import domain_mapping_pb2
from google3.cloud.graphite.mmv2.services.google.app_engine import (
domain_mapping_pb2_grpc,
)
from typing import List
class DomainMapping(object):
def __init__(
self,
self_link: str = None,
name: str = None,
ssl_settings: dict = None,
resource_records: list = None,
app: str = None,
service_account_file: str = "",
):
channel.initialize()
self.self_link = self_link
self.name = name
self.ssl_settings = ssl_settings
self.app = app
self.service_account_file = service_account_file
def apply(self):
stub = domain_mapping_pb2_grpc.AppengineDomainMappingServiceStub(
channel.Channel()
)
request = domain_mapping_pb2.ApplyAppengineDomainMappingRequest()
if Primitive.to_proto(self.self_link):
request.resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if DomainMappingSslSettings.to_proto(self.ssl_settings):
request.resource.ssl_settings.CopyFrom(
DomainMappingSslSettings.to_proto(self.ssl_settings)
)
else:
request.resource.ClearField("ssl_settings")
if Primitive.to_proto(self.app):
request.resource.app = Primitive.to_proto(self.app)
request.service_account_file = self.service_account_file
response = stub.ApplyAppengineDomainMapping(request)
self.self_link = Primitive.from_proto(response.self_link)
self.name = Primitive.from_proto(response.name)
self.ssl_settings = DomainMappingSslSettings.from_proto(response.ssl_settings)
self.resource_records = DomainMappingResourceRecordsArray.from_proto(
response.resource_records
)
self.app = Primitive.from_proto(response.app)
def delete(self):
stub = domain_mapping_pb2_grpc.AppengineDomainMappingServiceStub(
channel.Channel()
)
request = domain_mapping_pb2.DeleteAppengineDomainMappingRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.self_link):
request.resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if DomainMappingSslSettings.to_proto(self.ssl_settings):
request.resource.ssl_settings.CopyFrom(
DomainMappingSslSettings.to_proto(self.ssl_settings)
)
else:
request.resource.ClearField("ssl_settings")
if Primitive.to_proto(self.app):
request.resource.app = Primitive.to_proto(self.app)
response = stub.DeleteAppengineDomainMapping(request)
@classmethod
def list(self, app, service_account_file=""):
stub = domain_mapping_pb2_grpc.AppengineDomainMappingServiceStub(
channel.Channel()
)
request = domain_mapping_pb2.ListAppengineDomainMappingRequest()
request.service_account_file = service_account_file
request.App = app
return stub.ListAppengineDomainMapping(request).items
def to_proto(self):
resource = domain_mapping_pb2.AppengineDomainMapping()
if Primitive.to_proto(self.self_link):
resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if DomainMappingSslSettings.to_proto(self.ssl_settings):
resource.ssl_settings.CopyFrom(
DomainMappingSslSettings.to_proto(self.ssl_settings)
)
else:
resource.ClearField("ssl_settings")
if Primitive.to_proto(self.app):
resource.app = Primitive.to_proto(self.app)
return resource
class DomainMappingSslSettings(object):
def __init__(
self,
certificate_id: str = None,
ssl_management_type: str = None,
pending_managed_certificate_id: str = None,
):
self.certificate_id = certificate_id
self.ssl_management_type = ssl_management_type
self.pending_managed_certificate_id = pending_managed_certificate_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = domain_mapping_pb2.AppengineDomainMappingSslSettings()
if Primitive.to_proto(resource.certificate_id):
res.certificate_id = Primitive.to_proto(resource.certificate_id)
if DomainMappingSslSettingsSslManagementTypeEnum.to_proto(
resource.ssl_management_type
):
res.ssl_management_type = DomainMappingSslSettingsSslManagementTypeEnum.to_proto(
resource.ssl_management_type
)
if Primitive.to_proto(resource.pending_managed_certificate_id):
res.pending_managed_certificate_id = Primitive.to_proto(
resource.pending_managed_certificate_id
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return DomainMappingSslSettings(
certificate_id=Primitive.from_proto(resource.certificate_id),
ssl_management_type=DomainMappingSslSettingsSslManagementTypeEnum.from_proto(
resource.ssl_management_type
),
pending_managed_certificate_id=Primitive.from_proto(
resource.pending_managed_certificate_id
),
)
class DomainMappingSslSettingsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [DomainMappingSslSettings.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [DomainMappingSslSettings.from_proto(i) for i in resources]
class DomainMappingResourceRecords(object):
def __init__(self, name: str = None, rrdata: str = None, type: str = None):
self.name = name
self.rrdata = rrdata
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = domain_mapping_pb2.AppengineDomainMappingResourceRecords()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.rrdata):
res.rrdata = Primitive.to_proto(resource.rrdata)
if DomainMappingResourceRecordsTypeEnum.to_proto(resource.type):
res.type = DomainMappingResourceRecordsTypeEnum.to_proto(resource.type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return DomainMappingResourceRecords(
name=Primitive.from_proto(resource.name),
rrdata=Primitive.from_proto(resource.rrdata),
type=DomainMappingResourceRecordsTypeEnum.from_proto(resource.type),
)
class DomainMappingResourceRecordsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [DomainMappingResourceRecords.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [DomainMappingResourceRecords.from_proto(i) for i in resources]
class DomainMappingSslSettingsSslManagementTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return domain_mapping_pb2.AppengineDomainMappingSslSettingsSslManagementTypeEnum.Value(
"AppengineDomainMappingSslSettingsSslManagementTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return domain_mapping_pb2.AppengineDomainMappingSslSettingsSslManagementTypeEnum.Name(
resource
)[
len("AppengineDomainMappingSslSettingsSslManagementTypeEnum") :
]
class DomainMappingResourceRecordsTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return domain_mapping_pb2.AppengineDomainMappingResourceRecordsTypeEnum.Value(
"AppengineDomainMappingResourceRecordsTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return domain_mapping_pb2.AppengineDomainMappingResourceRecordsTypeEnum.Name(
resource
)[len("AppengineDomainMappingResourceRecordsTypeEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/appengine/domain_mapping.py
|
Python
|
apache-2.0
| 9,711
|
# -*- coding: utf-8 -*-
from models.contact import Contact
import random
import pytest
def test_edit_contact(app, db, check_ui):
with pytest.allure.step('Given a non-empty contact list'):
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(first_name="Contact_for_editing"))
old_contacts = db.get_contact_list()
with pytest.allure.step('Given a random contact from the list'):
contact_for_editing = random.choice(old_contacts)
with pytest.allure.step('Given a contact data'):
data_for_editing = Contact(first_name="Updated_first_name", last_name="Updated_last_name")
data_for_editing.id = contact_for_editing.id
with pytest.allure.step('When I replace the data in selected contact'):
app.contact.edit_contact_by_id(data_for_editing)
with pytest.allure.step(' Then the new contact list is equal to the old contact list with selected contact replaced by a new contact'):
new_contacts = db.get_contact_list()
old_contacts.remove(contact_for_editing)
old_contacts.append(data_for_editing)
assert sorted(old_contacts, key=Contact.contact_id_or_max) == sorted(new_contacts, key=Contact.contact_id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.contact_id_or_max) == \
sorted(app.contact.get_contact_list(), key=Contact.contact_id_or_max)
|
rgurevych/python_for_testers
|
tests/test_edit_contact.py
|
Python
|
apache-2.0
| 1,406
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Write sample summary data for the hparams plugin.
Each training-session here is a temperature simulation and records temperature
related metric. See the function `run` below for more details.
This demo is a slightly modified version of
tensorboard/plugins/scalar/scalar_demo.py.
See also `hparams_minimal_demo.py` in this directory for a demo that
trains real MNIST models instead of using synthetic data, at the cost of
taking much longer to run.
"""
import hashlib
import math
import os.path
import shutil
# TODO(erez): This code currently does not support eager mode and can't
# be run in tensorflow 2.0. Some of the issues are that it uses
# uses tf.compart.v1.summary.FileWriter which can't be used in eager
# mode (which is the default in 2.0). Fix this when we change this
# demo to be more typical to a machine learning experiment (b/121228006).
import tensorflow.compat.v1 as tf
from absl import flags
from absl import app
from google.protobuf import struct_pb2
from tensorboard.plugins.scalar import summary as scalar_summary
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import summary
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"num_session_groups",
50,
"The approximate number of session groups to create.",
)
flags.DEFINE_string(
"logdir",
"/tmp/hparams_minimal_demo",
"The directory to write the summary information to.",
)
flags.DEFINE_integer(
"summary_freq",
1,
"Summaries will be every n steps, " "where n is the value of this flag.",
)
flags.DEFINE_integer("num_steps", 100, "Number of steps per trial.")
# Total number of sessions is given by:
# len(TEMPERATURE_LIST)^2 * len(HEAT_COEFFICIENTS) * 2
HEAT_COEFFICIENTS = {"water": 0.001, "air": 0.003}
TEMPERATURE_LIST = []
# We can't initialize TEMPERATURE_LIST directly since the initialization
# depends on a flag and flag parsing hasn't happened yet. Instead, we use
# a function that we call in main() below.
def init_temperature_list():
global TEMPERATURE_LIST
TEMPERATURE_LIST = [
270 + i * 50.0
for i in range(
0, int(math.sqrt(FLAGS.num_session_groups / len(HEAT_COEFFICIENTS)))
)
]
def fingerprint(string):
m = hashlib.md5()
m.update(string.encode("utf-8"))
return m.hexdigest()
def create_experiment_summary():
"""Returns a summary proto buffer holding this experiment."""
# Convert TEMPERATURE_LIST to google.protobuf.ListValue
temperature_list = struct_pb2.ListValue()
temperature_list.extend(TEMPERATURE_LIST)
materials = struct_pb2.ListValue()
materials.extend(HEAT_COEFFICIENTS.keys())
return summary.experiment_pb(
hparam_infos=[
api_pb2.HParamInfo(
name="initial_temperature",
display_name="Initial temperature",
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list,
),
api_pb2.HParamInfo(
name="ambient_temperature",
display_name="Ambient temperature",
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list,
),
api_pb2.HParamInfo(
name="material",
display_name="Material",
type=api_pb2.DATA_TYPE_STRING,
domain_discrete=materials,
),
],
metric_infos=[
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag="temperature/current/scalar_summary"
),
display_name="Current Temp.",
),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag="temperature/difference_to_ambient/scalar_summary"
),
display_name="Difference To Ambient Temp.",
),
api_pb2.MetricInfo(
name=api_pb2.MetricName(tag="delta/scalar_summary"),
display_name="Delta T",
),
],
)
def run(logdir, session_id, hparams, group_name):
"""Runs a temperature simulation.
This will simulate an object at temperature `initial_temperature`
sitting at rest in a large room at temperature `ambient_temperature`.
The object has some intrinsic `heat_coefficient`, which indicates
how much thermal conductivity it has: for instance, metals have high
thermal conductivity, while the thermal conductivity of water is low.
Over time, the object's temperature will adjust to match the
temperature of its environment. We'll track the object's temperature,
how far it is from the room's temperature, and how much it changes at
each time step.
Arguments:
logdir: the top-level directory into which to write summary data
session_id: an id for the session.
hparams: A dictionary mapping a hyperparameter name to its value.
group_name: an id for the session group this session belongs to.
"""
tf.reset_default_graph()
tf.set_random_seed(0)
initial_temperature = hparams["initial_temperature"]
ambient_temperature = hparams["ambient_temperature"]
heat_coefficient = HEAT_COEFFICIENTS[hparams["material"]]
session_dir = os.path.join(logdir, session_id)
writer = tf.summary.FileWriter(session_dir)
writer.add_summary(
summary.session_start_pb(hparams=hparams, group_name=group_name)
)
writer.flush()
with tf.name_scope("temperature"):
# Create a mutable variable to hold the object's temperature, and
# create a scalar summary to track its value over time. The name of
# the summary will appear as 'temperature/current' due to the
# name-scope above.
temperature = tf.Variable(
tf.constant(initial_temperature), name="temperature"
)
scalar_summary.op(
"current",
temperature,
display_name="Temperature",
description="The temperature of the object under "
"simulation, in Kelvins.",
)
# Compute how much the object's temperature differs from that of its
# environment, and track this, too: likewise, as
# 'temperature/difference_to_ambient'.
ambient_difference = temperature - ambient_temperature
scalar_summary.op(
"difference_to_ambient",
ambient_difference,
display_name="Difference to ambient temperature",
description=(
"The difference between the ambient "
"temperature and the temperature of the "
"object under simulation, in Kelvins."
),
)
# Newton suggested that the rate of change of the temperature of an
# object is directly proportional to this `ambient_difference` above,
# where the proportionality constant is what we called the heat
# coefficient. But in real life, not everything is quite so clean, so
# we'll add in some noise. (The value of 50 is arbitrary, chosen to
# make the data look somewhat interesting. :-) )
noise = 50 * tf.random.normal([])
delta = -heat_coefficient * (ambient_difference + noise)
scalar_summary.op(
"delta",
delta,
description="The change in temperature from the previous "
"step, in Kelvins.",
)
# Collect all the scalars that we want to keep track of.
summ = tf.summary.merge_all()
# Now, augment the current temperature by this delta that we computed,
# blocking the assignment on summary collection to avoid race conditions
# and ensure that the summary always reports the pre-update value.
with tf.control_dependencies([summ]):
update_step = temperature.assign_add(delta)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(FLAGS.num_steps):
# By asking TensorFlow to compute the update step, we force it to
# change the value of the temperature variable. We don't actually
# care about this value, so we discard it; instead, we grab the
# summary data computed along the way.
(s, _) = sess.run([summ, update_step])
if (step % FLAGS.summary_freq) == 0:
writer.add_summary(s, global_step=step)
writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
writer.close()
def run_all(logdir, verbose=False):
"""Run simulations on a reasonable set of parameters.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins.
"""
writer = tf.summary.FileWriter(logdir)
writer.add_summary(create_experiment_summary())
writer.close()
session_num = 0
num_sessions = (
len(TEMPERATURE_LIST)
* len(TEMPERATURE_LIST)
* len(HEAT_COEFFICIENTS)
* 2
)
for initial_temperature in TEMPERATURE_LIST:
for ambient_temperature in TEMPERATURE_LIST:
for material in HEAT_COEFFICIENTS:
hparams = {
"initial_temperature": initial_temperature,
"ambient_temperature": ambient_temperature,
"material": material,
}
hparam_str = str(hparams)
group_name = fingerprint(hparam_str)
for repeat_idx in range(2):
session_id = str(session_num)
if verbose:
print(
"--- Running training session %d/%d"
% (session_num + 1, num_sessions)
)
print(hparam_str)
print("--- repeat #: %d" % (repeat_idx + 1))
run(logdir, session_id, hparams, group_name)
session_num += 1
def main(unused_argv):
if tf.executing_eagerly():
print("Sorry, this demo currently can't be run in eager mode.")
return
init_temperature_list()
shutil.rmtree(FLAGS.logdir, ignore_errors=True)
print("Saving output to %s." % FLAGS.logdir)
run_all(FLAGS.logdir, verbose=True)
print("Done. Output saved to %s." % FLAGS.logdir)
if __name__ == "__main__":
app.run(main)
|
tensorflow/tensorboard
|
tensorboard/plugins/hparams/hparams_minimal_demo.py
|
Python
|
apache-2.0
| 11,029
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Define the centralized register of all :class:`~luigi.task.Task` classes.
"""
import abc
from luigi import six
import logging
logger = logging.getLogger('luigi-interface')
class TaskClassException(Exception):
pass
class TaskClassNotFoundException(TaskClassException):
pass
class TaskClassAmbigiousException(TaskClassException):
pass
class Register(abc.ABCMeta):
"""
The Metaclass of :py:class:`Task`.
Acts as a global registry of Tasks with the following properties:
1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the
same object.
2. Keep track of all subclasses of :py:class:`Task` and expose them.
"""
__instance_cache = {}
_default_namespace_dict = {}
_reg = []
AMBIGUOUS_CLASS = object() # Placeholder denoting an error
"""If this value is returned by :py:meth:`_get_reg` then there is an
ambiguous task name (two :py:class:`Task` have the same name). This denotes
an error."""
def __new__(metacls, classname, bases, classdict):
"""
Custom class creation for namespacing.
Also register all subclasses.
When the set or inherited namespace evaluates to ``None``, set the task namespace to
whatever the currently declared namespace is.
"""
cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)
cls._namespace_at_class_time = metacls._get_namespace(cls.__module__)
metacls._reg.append(cls)
return cls
def __call__(cls, *args, **kwargs):
"""
Custom class instantiation utilizing instance cache.
If a Task has already been instantiated with the same parameters,
the previous instance is returned to reduce number of object instances.
"""
def instantiate():
return super(Register, cls).__call__(*args, **kwargs)
h = cls.__instance_cache
if h is None: # disabled
return instantiate()
# ignore dict param values. inconsequential and screws up hashing
params = cls.get_params()
param_values = cls.get_param_values(params, args, kwargs)
param_values_new = []
for param in param_values:
if isinstance(param[1], dict) is True:
continue
else:
param_values_new.append(param)
k = (cls, tuple(param_values_new))
try:
hash(k)
except TypeError:
logger.debug("Not all parameter values are hashable so instance isn't coming from the cache")
return instantiate() # unhashable types in parameters
if k not in h:
h[k] = instantiate()
return h[k]
@classmethod
def clear_instance_cache(cls):
"""
Clear/Reset the instance cache.
"""
cls.__instance_cache = {}
@classmethod
def disable_instance_cache(cls):
"""
Disables the instance cache.
"""
cls.__instance_cache = None
@property
def task_family(cls):
"""
Internal note: This function will be deleted soon.
"""
if not cls.get_task_namespace():
return cls.__name__
else:
return "{}.{}".format(cls.get_task_namespace(), cls.__name__)
@classmethod
def _get_reg(cls):
"""Return all of the registered classes.
:return: an ``dict`` of task_family -> class
"""
# We have to do this on-demand in case task names have changed later
reg = dict()
for task_cls in cls._reg:
if not task_cls._visible_in_registry:
continue
name = task_cls.get_task_family()
if name in reg and \
(reg[name] == Register.AMBIGUOUS_CLASS or # Check so issubclass doesn't crash
not issubclass(task_cls, reg[name])):
# Registering two different classes - this means we can't instantiate them by name
# The only exception is if one class is a subclass of the other. In that case, we
# instantiate the most-derived class (this fixes some issues with decorator wrappers).
reg[name] = Register.AMBIGUOUS_CLASS
else:
reg[name] = task_cls
return reg
@classmethod
def _set_reg(cls, reg):
"""The writing complement of _get_reg
"""
cls._reg = [task_cls for task_cls in reg.values() if task_cls is not cls.AMBIGUOUS_CLASS]
@classmethod
def task_names(cls):
"""
List of task names as strings
"""
return sorted(cls._get_reg().keys())
@classmethod
def tasks_str(cls):
"""
Human-readable register contents dump.
"""
return ','.join(cls.task_names())
@classmethod
def get_task_cls(cls, name):
"""
Returns an unambiguous class or raises an exception.
"""
task_cls = cls._get_reg().get(name)
if not task_cls:
raise TaskClassNotFoundException(cls._missing_task_msg(name))
if task_cls == cls.AMBIGUOUS_CLASS:
raise TaskClassAmbigiousException('Task %r is ambiguous' % name)
return task_cls
@classmethod
def get_all_params(cls):
"""
Compiles and returns all parameters for all :py:class:`Task`.
:return: a generator of tuples (TODO: we should make this more elegant)
"""
for task_name, task_cls in six.iteritems(cls._get_reg()):
if task_cls == cls.AMBIGUOUS_CLASS:
continue
for param_name, param_obj in task_cls.get_params():
yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj
@staticmethod
def _editdistance(a, b):
""" Simple unweighted Levenshtein distance """
r0 = range(0, len(b) + 1)
r1 = [0] * (len(b) + 1)
for i in range(0, len(a)):
r1[0] = i + 1
for j in range(0, len(b)):
c = 0 if a[i] is b[j] else 1
r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c)
r0 = r1[:]
return r1[len(b)]
@classmethod
def _missing_task_msg(cls, task_name):
weighted_tasks = [(Register._editdistance(task_name, task_name_2), task_name_2) for task_name_2 in cls.task_names()]
ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0])
candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)]
if candidates:
return "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates))
else:
return "No task %s. Candidates are: %s" % (task_name, cls.tasks_str())
@classmethod
def _get_namespace(mcs, module_name):
for parent in mcs._module_parents(module_name):
entry = mcs._default_namespace_dict.get(parent)
if entry:
return entry
return '' # Default if nothing specifies
@staticmethod
def _module_parents(module_name):
'''
>>> list(Register._module_parents('a.b'))
['a.b', 'a', '']
'''
spl = module_name.split('.')
for i in range(len(spl), 0, -1):
yield '.'.join(spl[0:i])
if module_name:
yield ''
def load_task(module, task_name, params_str):
"""
Imports task dynamically given a module and a task name.
"""
if module is not None:
__import__(module)
task_cls = Register.get_task_cls(task_name)
return task_cls.from_str_params(params_str)
|
ContextLogic/luigi
|
luigi/task_register.py
|
Python
|
apache-2.0
| 8,272
|
import json
import urllib.request
from nflh.games import Game
class Video:
def __init__(self, id_, desc, url):
self.id_ = id_
self.desc = desc
self.url = url
def __str__(self):
return self.id_ + " - " + self.desc + "\n" + self.url
def get_highest_bit_rate_clip(self, clips):
bit_rated = sorted(clips["videoBitRates"], key=lambda video: video["bitrate"], reverse=True)
return bit_rated[0]["videoPath"]
def get_game_highlights(self, game: Game):
highlights = []
response = urllib.request.urlopen(game.game_center_url())
string = response.read().decode('utf-8')
data = json.loads(string)
in_game_highlights = filter(lambda x: x["clipType"] == "in-game-highlight", data["videos"])
for video in in_game_highlights:
highlights.append(Video(video["id"], video["headline"], self.get_highest_bit_rate_clip(video)))
return highlights
|
twbarber/nfl-highlight-bot
|
nflh/videos.py
|
Python
|
apache-2.0
| 963
|
# Задача 2. Вариант 10.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам
# высказывание, автором которого является Юстиниан. Не забудьте о том, что
# автор должен быть упомянут на отдельной строке.
# Колеганов Никита Сергеевич
# 29.05.2016
print("Весьма несправедливо не принимать подарков ни у кого, но очень скверно брать жадно все.")
print("\n\t\tЮстиниан")
input("\n\nНажмите Enter для выхода.")
|
Mariaanisimova/pythonintask
|
PINp/2014/Koleganov_N_S/task_2_10.py
|
Python
|
apache-2.0
| 734
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import requests
import subprocess
import pathlib
def exec(args, env=None, cwd=None, print_stdout=True):
if env is None:
env = os.environ.copy()
print("running %s" % (args))
r = subprocess.run(
args, env=env, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if print_stdout:
print(r.stdout.decode())
print(r.stderr.decode())
if r.returncode != 0:
r.check_returncode()
return r.stdout.decode()
def read_url(u):
r = requests.get(u)
if r.status_code != 200:
raise Exception("unexpected response code %d fetching %s" % (r.status_code, u))
return r.text
archive = os.path.join(pathlib.Path.home(), ".cache", "kops-test", "assets")
def sha256_of_file(f):
stdout = exec(["sha256sum", f])
return stdout.split()[0]
def download_hashed_url(url):
hash = read_url(url + ".sha256").strip()
os.makedirs(archive, exist_ok=True)
dest = os.path.join(archive, hash)
if os.path.exists(dest):
actual_hash = sha256_of_file(dest)
if actual_hash != hash:
print(
"hash mismatch on %s (%s vs %s), will download again"
% (dest, actual_hash, hash)
)
else:
return dest
download_url(url, dest)
return dest
def download_url(url, dest):
exec(["curl", url, "-o", dest])
def expand_tar(tarfile):
hash = sha256_of_file(tarfile)
dest = os.path.join(archive, "expanded", hash)
if os.path.exists(dest):
return dest
tmpdest = dest + ".tmp"
os.makedirs(tmpdest)
exec(["tar", "xf", tarfile, "-C", tmpdest])
exec(["mv", tmpdest, dest])
return dest
|
GoogleCloudPlatform/cnrm-blueprints
|
test/lib/downloads.py
|
Python
|
apache-2.0
| 2,284
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import util.graphalgorithim.basic
# Based on the paper:
# Efficient Implementation of Lattice Operations
# HASSAN AIT-KACI, ROBERT BOYER, PATRICK LINCOLN, and ROGER NASR
class Lattice(object):
def __init__(self, G, head):
self.G = G
self.head = head
rG = util.graphalgorithim.basic.reverseDirectedGraph(G)
pending = set((self.head,))
processed = set()
self.encoding = {}
self.decoding = {}
p = 0
# Just do reverse post order?
while pending:
current = pending.pop()
processed.add(current)
encoding = reduce(lambda a, b: a|b, [self.encoding[child] for child in rG.get(current, ())], 0)
if encoding in self.decoding:
encoding |= 2**p
p += 1
self.encoding[current] = encoding
self.decoding[encoding] = current
print current, ' - ', "%x" % encoding
for child in G.get(current, ()):
if processed.issuperset(rG.get(child, ())):
pending.add(child)
def decode(self, code):
if code in self.decoding:
return (self.decoding[code],)
else:
assert False
def lub(self, inital, *args):
current = self.encoding[inital]
for arg in args:
current |= self.encoding[arg]
print "LUB %x" % current
return self.decode(current)
def glb(self, inital, *args):
current = self.encoding[inital]
for arg in args:
current &= self.encoding[arg]
print "GLB %r, %s - %x" % (inital, ", ".join([repr(arg) for arg in args]), current)
return self.decode(current)
|
ncbray/pystream
|
sandbox/lattice.py
|
Python
|
apache-2.0
| 2,025
|
# pylint: disable=wrong-or-nonexistent-copyright-notice
from typing import Any, Dict, FrozenSet, Iterable, Tuple, TYPE_CHECKING, Union
import numpy as np
from cirq import linalg, protocols, value
from cirq._compat import proper_repr
from cirq.ops import raw_types
if TYPE_CHECKING:
import cirq
class MixedUnitaryChannel(raw_types.Gate):
"""A generic mixture that can record the index of its selected operator.
This type of object is also referred to as a mixed-unitary channel.
Args:
mixture: a list of (probability, qubit unitary) pairs
key: an optional measurement key string for this mixture. Simulations
which select a single unitary to apply will store the index
of that unitary in the measurement result list with this key.
validate: if True, validate that `mixture` describes a valid mixture.
This validation can be slow; prefer pre-validating if possible.
"""
def __init__(
self,
mixture: Iterable[Tuple[float, np.ndarray]],
key: Union[str, 'cirq.MeasurementKey', None] = None,
validate: bool = False,
):
mixture = list(mixture)
if not mixture:
raise ValueError('MixedUnitaryChannel must have at least one unitary.')
if not protocols.approx_eq(sum(p[0] for p in mixture), 1):
raise ValueError('Unitary probabilities must sum to 1.')
m0 = mixture[0][1]
num_qubits = np.log2(m0.shape[0])
if not num_qubits.is_integer() or m0.shape[1] != m0.shape[0]:
raise ValueError(
f'Input mixture of shape {m0.shape} does not '
'represent a square operator over qubits.'
)
self._num_qubits = int(num_qubits)
for i, op in enumerate(p[1] for p in mixture):
if not op.shape == m0.shape:
raise ValueError(
f'Inconsistent unitary shapes: op[0]: {m0.shape}, op[{i}]: {op.shape}'
)
if validate and not linalg.is_unitary(op):
raise ValueError(f'Element {i} of mixture is non-unitary.')
self._mixture = mixture
if not isinstance(key, value.MeasurementKey) and key is not None:
key = value.MeasurementKey(key)
self._key = key
@staticmethod
def from_mixture(
mixture: 'protocols.SupportsMixture', key: Union[str, 'cirq.MeasurementKey', None] = None
):
"""Creates a copy of a mixture with the given measurement key."""
return MixedUnitaryChannel(mixture=list(protocols.mixture(mixture)), key=key)
def __eq__(self, other) -> bool:
if not isinstance(other, MixedUnitaryChannel):
return NotImplemented
if self._key != other._key:
return False
if not np.allclose(
[m[0] for m in self._mixture],
[m[0] for m in other._mixture],
):
return False
return np.allclose(
[m[1] for m in self._mixture],
[m[1] for m in other._mixture],
)
def num_qubits(self) -> int:
return self._num_qubits
def _mixture_(self):
return self._mixture
def _measurement_key_name_(self) -> str:
if self._key is None:
return NotImplemented
return str(self._key)
def _measurement_key_obj_(self) -> 'cirq.MeasurementKey':
if self._key is None:
return NotImplemented
return self._key
def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):
if self._key is None:
return NotImplemented
if self._key not in key_map:
return self
return MixedUnitaryChannel(mixture=self._mixture, key=key_map[str(self._key)])
def _with_key_path_(self, path: Tuple[str, ...]):
return MixedUnitaryChannel(
mixture=self._mixture, key=protocols.with_key_path(self._key, path)
)
def _with_key_path_prefix_(self, prefix: Tuple[str, ...]):
return MixedUnitaryChannel(
mixture=self._mixture, key=protocols.with_key_path_prefix(self._key, prefix)
)
def _with_rescoped_keys_(
self,
path: Tuple[str, ...],
bindable_keys: FrozenSet['cirq.MeasurementKey'],
):
return MixedUnitaryChannel(
mixture=self._mixture,
key=protocols.with_rescoped_keys(self._key, path, bindable_keys),
)
def __str__(self):
if self._key is not None:
return f'MixedUnitaryChannel({self._mixture}, key={self._key})'
return f'MixedUnitaryChannel({self._mixture})'
def __repr__(self):
unitary_tuples = [
'(' + repr(op[0]) + ', ' + proper_repr(op[1]) + ')' for op in self._mixture
]
args = [f'mixture=[{", ".join(unitary_tuples)}]']
if self._key is not None:
args.append(f'key=\'{self._key}\'')
return f'cirq.MixedUnitaryChannel({", ".join(args)})'
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['_mixture', '_key'])
@classmethod
def _from_json_dict_(cls, _mixture, _key, **kwargs):
mix_pairs = [(m[0], np.asarray(m[1])) for m in _mixture]
return cls(mixture=mix_pairs, key=_key)
|
quantumlib/Cirq
|
cirq-core/cirq/ops/mixed_unitary_channel.py
|
Python
|
apache-2.0
| 5,301
|
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'The StackLight Elasticsearch-Kibana Plugin for Fuel'
copyright = u'2016, Mirantis Inc.'
version = '1.1'
release = '1.1.0'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
latex_documents = [
('index', 'ElasticsearchKibana.tex', u'Fuel StackLight Elasticsearch-Kibana Plugin Guide',
u'Mirantis Inc.', 'manual'),
]
# make latex stop printing blank pages between sections
# http://stackoverflow.com/questions/5422997/sphinx-docs-remove-blank-pages-from-generated-pdfs
latex_elements = {'classoptions': ',openany,oneside', 'babel':
'\\usepackage[english]{babel}'}
|
saqibarfeen/iota-influxdb-grafana
|
iota-elasticsearch-kibana/doc/source/conf.py
|
Python
|
apache-2.0
| 749
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import hashlib
import json
import logging
import math
import os
import subprocess
import threading
import time
from collections import defaultdict
import numpy as np
import ray.services as services
import yaml
from ray.worker import global_worker
from ray.autoscaler.docker import dockerize_if_needed
from ray.autoscaler.node_provider import get_node_provider, \
get_default_config
from ray.autoscaler.tags import (TAG_RAY_LAUNCH_CONFIG, TAG_RAY_RUNTIME_CONFIG,
TAG_RAY_NODE_STATUS, TAG_RAY_NODE_TYPE,
TAG_RAY_NODE_NAME, STATUS_UP_TO_DATE,
STATUS_UNINITIALIZED, NODE_TYPE_WORKER)
from ray.autoscaler.updater import NodeUpdaterThread
from ray.ray_constants import AUTOSCALER_MAX_NUM_FAILURES, \
AUTOSCALER_MAX_LAUNCH_BATCH, AUTOSCALER_MAX_CONCURRENT_LAUNCHES, \
AUTOSCALER_UPDATE_INTERVAL_S, AUTOSCALER_HEARTBEAT_TIMEOUT_S, \
AUTOSCALER_RESOURCE_REQUEST_CHANNEL, MEMORY_RESOURCE_UNIT_BYTES
from six import string_types
from six.moves import queue
logger = logging.getLogger(__name__)
REQUIRED, OPTIONAL = True, False
# For (a, b), if a is a dictionary object, then
# no extra fields can be introduced.
CLUSTER_CONFIG_SCHEMA = {
# An unique identifier for the head node and workers of this cluster.
"cluster_name": (str, REQUIRED),
# The minimum number of workers nodes to launch in addition to the head
# node. This number should be >= 0.
"min_workers": (int, OPTIONAL),
# The maximum number of workers nodes to launch in addition to the head
# node. This takes precedence over min_workers.
"max_workers": (int, REQUIRED),
# The number of workers to launch initially, in addition to the head node.
"initial_workers": (int, OPTIONAL),
# The mode of the autoscaler e.g. default, aggressive
"autoscaling_mode": (str, OPTIONAL),
# The autoscaler will scale up the cluster to this target fraction of
# resources usage. For example, if a cluster of 8 nodes is 100% busy
# and target_utilization was 0.8, it would resize the cluster to 10.
"target_utilization_fraction": (float, OPTIONAL),
# If a node is idle for this many minutes, it will be removed.
"idle_timeout_minutes": (int, OPTIONAL),
# Cloud-provider specific configuration.
"provider": (
{
"type": (str, REQUIRED), # e.g. aws
"region": (str, OPTIONAL), # e.g. us-east-1
"availability_zone": (str, OPTIONAL), # e.g. us-east-1a
"module": (str,
OPTIONAL), # module, if using external node provider
"project_id": (None, OPTIONAL), # gcp project id, if using gcp
"head_ip": (str, OPTIONAL), # local cluster head node
"worker_ips": (list, OPTIONAL), # local cluster worker nodes
"use_internal_ips": (bool, OPTIONAL), # don't require public ips
"namespace": (str, OPTIONAL), # k8s namespace, if using k8s
# k8s autoscaler permissions, if using k8s
"autoscaler_service_account": (dict, OPTIONAL),
"autoscaler_role": (dict, OPTIONAL),
"autoscaler_role_binding": (dict, OPTIONAL),
"extra_config": (dict, OPTIONAL), # provider-specific config
# Whether to try to reuse previously stopped nodes instead of
# launching nodes. This will also cause the autoscaler to stop
# nodes instead of terminating them. Only implemented for AWS.
"cache_stopped_nodes": (bool, OPTIONAL),
},
REQUIRED),
# How Ray will authenticate with newly launched nodes.
"auth": (
{
"ssh_user": (str, OPTIONAL), # e.g. ubuntu
"ssh_private_key": (str, OPTIONAL),
},
OPTIONAL),
# Docker configuration. If this is specified, all setup and start commands
# will be executed in the container.
"docker": (
{
"image": (str, OPTIONAL), # e.g. tensorflow/tensorflow:1.5.0-py3
"container_name": (str, OPTIONAL), # e.g., ray_docker
# shared options for starting head/worker docker
"run_options": (list, OPTIONAL),
# image for head node, takes precedence over "image" if specified
"head_image": (str, OPTIONAL),
# head specific run options, appended to run_options
"head_run_options": (list, OPTIONAL),
# analogous to head_image
"worker_image": (str, OPTIONAL),
# analogous to head_run_options
"worker_run_options": (list, OPTIONAL),
},
OPTIONAL),
# Provider-specific config for the head node, e.g. instance type.
"head_node": (dict, OPTIONAL),
# Provider-specific config for worker nodes. e.g. instance type.
"worker_nodes": (dict, OPTIONAL),
# Map of remote paths to local paths, e.g. {"/tmp/data": "/my/local/data"}
"file_mounts": (dict, OPTIONAL),
# List of commands that will be run before `setup_commands`. If docker is
# enabled, these commands will run outside the container and before docker
# is setup.
"initialization_commands": (list, OPTIONAL),
# List of common shell commands to run to setup nodes.
"setup_commands": (list, OPTIONAL),
# Commands that will be run on the head node after common setup.
"head_setup_commands": (list, OPTIONAL),
# Commands that will be run on worker nodes after common setup.
"worker_setup_commands": (list, OPTIONAL),
# Command to start ray on the head node. You shouldn't need to modify this.
"head_start_ray_commands": (list, OPTIONAL),
# Command to start ray on worker nodes. You shouldn't need to modify this.
"worker_start_ray_commands": (list, OPTIONAL),
# Whether to avoid restarting the cluster during updates. This field is
# controlled by the ray --no-restart flag and cannot be set by the user.
"no_restart": (None, OPTIONAL),
}
class LoadMetrics(object):
"""Container for cluster load metrics.
Metrics here are updated from raylet heartbeats. The autoscaler
queries these metrics to determine when to scale up, and which nodes
can be removed.
"""
def __init__(self):
self.last_used_time_by_ip = {}
self.last_heartbeat_time_by_ip = {}
self.static_resources_by_ip = {}
self.dynamic_resources_by_ip = {}
self.resource_load_by_ip = {}
self.local_ip = services.get_node_ip_address()
def update(self, ip, static_resources, dynamic_resources, resource_load):
self.resource_load_by_ip[ip] = resource_load
self.static_resources_by_ip[ip] = static_resources
# We are not guaranteed to have a corresponding dynamic resource for
# every static resource because dynamic resources are based on the
# available resources in the heartbeat, which does not exist if it is
# zero. Thus, we have to update dynamic resources here.
dynamic_resources_update = dynamic_resources.copy()
for resource_name, capacity in static_resources.items():
if resource_name not in dynamic_resources_update:
dynamic_resources_update[resource_name] = 0.0
self.dynamic_resources_by_ip[ip] = dynamic_resources_update
now = time.time()
if ip not in self.last_used_time_by_ip or \
static_resources != dynamic_resources:
self.last_used_time_by_ip[ip] = now
self.last_heartbeat_time_by_ip[ip] = now
def mark_active(self, ip):
assert ip is not None, "IP should be known at this time"
logger.info("Node {} is newly setup, treating as active".format(ip))
self.last_heartbeat_time_by_ip[ip] = time.time()
def prune_active_ips(self, active_ips):
active_ips = set(active_ips)
active_ips.add(self.local_ip)
def prune(mapping):
unwanted = set(mapping) - active_ips
for unwanted_key in unwanted:
logger.info("LoadMetrics: "
"Removed mapping: {} - {}".format(
unwanted_key, mapping[unwanted_key]))
del mapping[unwanted_key]
if unwanted:
logger.info(
"LoadMetrics: "
"Removed {} stale ip mappings: {} not in {}".format(
len(unwanted), unwanted, active_ips))
assert not (unwanted & set(mapping))
prune(self.last_used_time_by_ip)
prune(self.static_resources_by_ip)
prune(self.dynamic_resources_by_ip)
prune(self.resource_load_by_ip)
prune(self.last_heartbeat_time_by_ip)
def approx_workers_used(self):
return self._info()["NumNodesUsed"]
def num_workers_connected(self):
return self._info()["NumNodesConnected"]
def get_resource_usage(self):
num_nodes = len(self.static_resources_by_ip)
nodes_used = 0.0
num_nonidle = 0
has_saturated_node = False
resources_used = {}
resources_total = {}
for ip, max_resources in self.static_resources_by_ip.items():
avail_resources = self.dynamic_resources_by_ip[ip]
resource_load = self.resource_load_by_ip[ip]
max_frac = 0.0
for resource_id, amount in resource_load.items():
if amount > 0:
has_saturated_node = True
max_frac = 1.0 # the resource is saturated
for resource_id, amount in max_resources.items():
used = amount - avail_resources[resource_id]
if resource_id not in resources_used:
resources_used[resource_id] = 0.0
resources_total[resource_id] = 0.0
resources_used[resource_id] += used
resources_total[resource_id] += amount
used = max(0, used)
if amount > 0:
frac = used / float(amount)
if frac > max_frac:
max_frac = frac
nodes_used += max_frac
if max_frac > 0:
num_nonidle += 1
# If any nodes have a queue buildup, assume all non-idle nodes are 100%
# busy, plus the head node. This guards against the case of not scaling
# up due to poor task packing.
if has_saturated_node:
nodes_used = min(num_nonidle + 1.0, num_nodes)
return nodes_used, resources_used, resources_total
def info_string(self):
return ", ".join(
["{}={}".format(k, v) for k, v in sorted(self._info().items())])
def _info(self):
nodes_used, resources_used, resources_total = self.get_resource_usage()
now = time.time()
idle_times = [now - t for t in self.last_used_time_by_ip.values()]
heartbeat_times = [
now - t for t in self.last_heartbeat_time_by_ip.values()
]
most_delayed_heartbeats = sorted(
list(self.last_heartbeat_time_by_ip.items()),
key=lambda pair: pair[1])[:5]
most_delayed_heartbeats = {
ip: (now - t)
for ip, t in most_delayed_heartbeats
}
def format_resource(key, value):
if key in ["object_store_memory", "memory"]:
return "{} GiB".format(
round(value * MEMORY_RESOURCE_UNIT_BYTES / 1e9, 2))
else:
return round(value, 2)
return {
"ResourceUsage": ", ".join([
"{}/{} {}".format(
format_resource(rid, resources_used[rid]),
format_resource(rid, resources_total[rid]), rid)
for rid in sorted(resources_used)
]),
"NumNodesConnected": len(self.static_resources_by_ip),
"NumNodesUsed": round(nodes_used, 2),
"NodeIdleSeconds": "Min={} Mean={} Max={}".format(
int(np.min(idle_times)) if idle_times else -1,
int(np.mean(idle_times)) if idle_times else -1,
int(np.max(idle_times)) if idle_times else -1),
"TimeSinceLastHeartbeat": "Min={} Mean={} Max={}".format(
int(np.min(heartbeat_times)) if heartbeat_times else -1,
int(np.mean(heartbeat_times)) if heartbeat_times else -1,
int(np.max(heartbeat_times)) if heartbeat_times else -1),
"MostDelayedHeartbeats": most_delayed_heartbeats,
}
class NodeLauncher(threading.Thread):
def __init__(self, provider, queue, pending, index=None, *args, **kwargs):
self.queue = queue
self.pending = pending
self.provider = provider
self.index = str(index) if index is not None else ""
super(NodeLauncher, self).__init__(*args, **kwargs)
def _launch_node(self, config, count):
worker_filter = {TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER}
before = self.provider.non_terminated_nodes(tag_filters=worker_filter)
launch_hash = hash_launch_conf(config["worker_nodes"], config["auth"])
self.log("Launching {} nodes.".format(count))
self.provider.create_node(
config["worker_nodes"], {
TAG_RAY_NODE_NAME: "ray-{}-worker".format(
config["cluster_name"]),
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER,
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
TAG_RAY_LAUNCH_CONFIG: launch_hash,
}, count)
after = self.provider.non_terminated_nodes(tag_filters=worker_filter)
if set(after).issubset(before):
self.log("No new nodes reported after node creation.")
def run(self):
while True:
config, count = self.queue.get()
self.log("Got {} nodes to launch.".format(count))
try:
self._launch_node(config, count)
except Exception:
logger.exception("Launch failed")
finally:
self.pending.dec(count)
def log(self, statement):
prefix = "NodeLauncher{}:".format(self.index)
logger.info(prefix + " {}".format(statement))
class ConcurrentCounter():
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def inc(self, count):
with self._lock:
self._value += count
return self._value
def dec(self, count):
with self._lock:
assert self._value >= count, "counter cannot go negative"
self._value -= count
return self._value
@property
def value(self):
with self._lock:
return self._value
class StandardAutoscaler(object):
"""The autoscaling control loop for a Ray cluster.
There are two ways to start an autoscaling cluster: manually by running
`ray start --head --autoscaling-config=/path/to/config.yaml` on a
instance that has permission to launch other instances, or you can also use
`ray create_or_update /path/to/config.yaml` from your laptop, which will
configure the right AWS/Cloud roles automatically.
StandardAutoscaler's `update` method is periodically called by `monitor.py`
to add and remove nodes as necessary. Currently, load-based autoscaling is
not implemented, so all this class does is try to maintain a constant
cluster size.
StandardAutoscaler is also used to bootstrap clusters (by adding workers
until the target cluster size is met).
"""
def __init__(self,
config_path,
load_metrics,
max_launch_batch=AUTOSCALER_MAX_LAUNCH_BATCH,
max_concurrent_launches=AUTOSCALER_MAX_CONCURRENT_LAUNCHES,
max_failures=AUTOSCALER_MAX_NUM_FAILURES,
process_runner=subprocess,
update_interval_s=AUTOSCALER_UPDATE_INTERVAL_S):
self.config_path = config_path
self.reload_config(errors_fatal=True)
self.load_metrics = load_metrics
self.provider = get_node_provider(self.config["provider"],
self.config["cluster_name"])
self.max_failures = max_failures
self.max_launch_batch = max_launch_batch
self.max_concurrent_launches = max_concurrent_launches
self.process_runner = process_runner
# Map from node_id to NodeUpdater processes
self.updaters = {}
self.num_failed_updates = defaultdict(int)
self.num_successful_updates = defaultdict(int)
self.num_failures = 0
self.last_update_time = 0.0
self.update_interval_s = update_interval_s
self.bringup = True
# Node launchers
self.launch_queue = queue.Queue()
self.num_launches_pending = ConcurrentCounter()
max_batches = math.ceil(
max_concurrent_launches / float(max_launch_batch))
for i in range(int(max_batches)):
node_launcher = NodeLauncher(
provider=self.provider,
queue=self.launch_queue,
index=i,
pending=self.num_launches_pending)
node_launcher.daemon = True
node_launcher.start()
# Expand local file_mounts to allow ~ in the paths. This can't be done
# earlier when the config is written since we might be on different
# platform and the expansion would result in wrong path.
self.config["file_mounts"] = {
remote: os.path.expanduser(local)
for remote, local in self.config["file_mounts"].items()
}
for local_path in self.config["file_mounts"].values():
assert os.path.exists(local_path)
self.resource_requests = defaultdict(int)
logger.info("StandardAutoscaler: {}".format(self.config))
def update(self):
try:
self.reload_config(errors_fatal=False)
self._update()
except Exception as e:
logger.exception("StandardAutoscaler: "
"Error during autoscaling.")
self.num_failures += 1
if self.num_failures > self.max_failures:
logger.critical("StandardAutoscaler: "
"Too many errors, abort.")
raise e
def _update(self):
now = time.time()
# Throttle autoscaling updates to this interval to avoid exceeding
# rate limits on API calls.
if now - self.last_update_time < self.update_interval_s:
return
self.last_update_time = now
num_pending = self.num_launches_pending.value
nodes = self.workers()
self.load_metrics.prune_active_ips(
[self.provider.internal_ip(node_id) for node_id in nodes])
target_workers = self.target_num_workers()
if len(nodes) >= target_workers:
if "CPU" in self.resource_requests:
del self.resource_requests["CPU"]
self.log_info_string(nodes, target_workers)
# Terminate any idle or out of date nodes
last_used = self.load_metrics.last_used_time_by_ip
horizon = now - (60 * self.config["idle_timeout_minutes"])
nodes_to_terminate = []
for node_id in nodes:
node_ip = self.provider.internal_ip(node_id)
if node_ip in last_used and last_used[node_ip] < horizon and \
len(nodes) - len(nodes_to_terminate) > target_workers:
logger.info("StandardAutoscaler: "
"{}: Terminating idle node".format(node_id))
nodes_to_terminate.append(node_id)
elif not self.launch_config_ok(node_id):
logger.info("StandardAutoscaler: "
"{}: Terminating outdated node".format(node_id))
nodes_to_terminate.append(node_id)
if nodes_to_terminate:
self.provider.terminate_nodes(nodes_to_terminate)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Terminate nodes if there are too many
nodes_to_terminate = []
while len(nodes) > self.config["max_workers"]:
logger.info("StandardAutoscaler: "
"{}: Terminating unneeded node".format(nodes[-1]))
nodes_to_terminate.append(nodes[-1])
nodes = nodes[:-1]
if nodes_to_terminate:
self.provider.terminate_nodes(nodes_to_terminate)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Launch new nodes if needed
num_workers = len(nodes) + num_pending
if num_workers < target_workers:
max_allowed = min(self.max_launch_batch,
self.max_concurrent_launches - num_pending)
num_launches = min(max_allowed, target_workers - num_workers)
self.launch_new_node(num_launches)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
elif self.load_metrics.num_workers_connected() >= target_workers:
logger.info("Ending bringup phase")
self.bringup = False
self.log_info_string(nodes, target_workers)
# Process any completed updates
completed = []
for node_id, updater in self.updaters.items():
if not updater.is_alive():
completed.append(node_id)
if completed:
for node_id in completed:
if self.updaters[node_id].exitcode == 0:
self.num_successful_updates[node_id] += 1
else:
self.num_failed_updates[node_id] += 1
del self.updaters[node_id]
# Mark the node as active to prevent the node recovery logic
# immediately trying to restart Ray on the new node.
self.load_metrics.mark_active(self.provider.internal_ip(node_id))
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Update nodes with out-of-date files
for node_id, commands, ray_start in (self.should_update(node_id)
for node_id in nodes):
if node_id is not None:
self.spawn_updater(node_id, commands, ray_start)
# Attempt to recover unhealthy nodes
for node_id in nodes:
self.recover_if_needed(node_id, now)
def reload_config(self, errors_fatal=False):
try:
with open(self.config_path) as f:
new_config = yaml.safe_load(f.read())
validate_config(new_config)
new_launch_hash = hash_launch_conf(new_config["worker_nodes"],
new_config["auth"])
new_runtime_hash = hash_runtime_conf(new_config["file_mounts"], [
new_config["worker_setup_commands"],
new_config["worker_start_ray_commands"]
])
self.config = new_config
self.launch_hash = new_launch_hash
self.runtime_hash = new_runtime_hash
except Exception as e:
if errors_fatal:
raise e
else:
logger.exception("StandardAutoscaler: "
"Error parsing config.")
def target_num_workers(self):
target_frac = self.config["target_utilization_fraction"]
cur_used = self.load_metrics.approx_workers_used()
ideal_num_nodes = int(np.ceil(cur_used / float(target_frac)))
ideal_num_workers = ideal_num_nodes - 1 # subtract 1 for head node
initial_workers = self.config["initial_workers"]
aggressive = self.config["autoscaling_mode"] == "aggressive"
if self.bringup:
ideal_num_workers = max(ideal_num_workers, initial_workers)
elif aggressive and cur_used > 0:
# If we want any workers, we want at least initial_workers
ideal_num_workers = max(ideal_num_workers, initial_workers)
# Other resources are not supported at present.
if "CPU" in self.resource_requests:
try:
cores_per_worker = self.config["worker_nodes"]["Resources"][
"CPU"]
except KeyError:
cores_per_worker = 1 # Assume the worst
cores_desired = self.resource_requests["CPU"]
ideal_num_workers = max(
ideal_num_workers,
int(np.ceil(cores_desired / cores_per_worker)))
return min(self.config["max_workers"],
max(self.config["min_workers"], ideal_num_workers))
def launch_config_ok(self, node_id):
launch_conf = self.provider.node_tags(node_id).get(
TAG_RAY_LAUNCH_CONFIG)
if self.launch_hash != launch_conf:
return False
return True
def files_up_to_date(self, node_id):
applied = self.provider.node_tags(node_id).get(TAG_RAY_RUNTIME_CONFIG)
if applied != self.runtime_hash:
logger.info("StandardAutoscaler: "
"{}: Runtime state is {}, want {}".format(
node_id, applied, self.runtime_hash))
return False
return True
def recover_if_needed(self, node_id, now):
if not self.can_update(node_id):
return
key = self.provider.internal_ip(node_id)
if key not in self.load_metrics.last_heartbeat_time_by_ip:
self.load_metrics.last_heartbeat_time_by_ip[key] = now
last_heartbeat_time = self.load_metrics.last_heartbeat_time_by_ip[key]
delta = now - last_heartbeat_time
if delta < AUTOSCALER_HEARTBEAT_TIMEOUT_S:
return
logger.warning("StandardAutoscaler: "
"{}: No heartbeat in {}s, "
"restarting Ray to recover...".format(node_id, delta))
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts={},
initialization_commands=[],
setup_commands=[],
ray_start_commands=with_head_node_ip(
self.config["worker_start_ray_commands"]),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True)
updater.start()
self.updaters[node_id] = updater
def should_update(self, node_id):
if not self.can_update(node_id):
return None, None, None # no update
status = self.provider.node_tags(node_id).get(TAG_RAY_NODE_STATUS)
if status == STATUS_UP_TO_DATE and self.files_up_to_date(node_id):
return None, None, None # no update
successful_updated = self.num_successful_updates.get(node_id, 0) > 0
if successful_updated and self.config.get("restart_only", False):
init_commands = []
ray_commands = self.config["worker_start_ray_commands"]
elif successful_updated and self.config.get("no_restart", False):
init_commands = self.config["worker_setup_commands"]
ray_commands = []
else:
init_commands = self.config["worker_setup_commands"]
ray_commands = self.config["worker_start_ray_commands"]
return (node_id, init_commands, ray_commands)
def spawn_updater(self, node_id, init_commands, ray_start_commands):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts=self.config["file_mounts"],
initialization_commands=with_head_node_ip(
self.config["initialization_commands"]),
setup_commands=with_head_node_ip(init_commands),
ray_start_commands=with_head_node_ip(ray_start_commands),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True)
updater.start()
self.updaters[node_id] = updater
def can_update(self, node_id):
if node_id in self.updaters:
return False
if not self.launch_config_ok(node_id):
return False
if self.num_failed_updates.get(node_id, 0) > 0: # TODO(ekl) retry?
return False
return True
def launch_new_node(self, count):
logger.info(
"StandardAutoscaler: Queue {} new nodes for launch".format(count))
self.num_launches_pending.inc(count)
config = copy.deepcopy(self.config)
self.launch_queue.put((config, count))
def workers(self):
return self.provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER})
def log_info_string(self, nodes, target):
logger.info("StandardAutoscaler: {}".format(
self.info_string(nodes, target)))
logger.info("LoadMetrics: {}".format(self.load_metrics.info_string()))
def info_string(self, nodes, target):
suffix = ""
if self.num_launches_pending:
suffix += " ({} pending)".format(self.num_launches_pending.value)
if self.updaters:
suffix += " ({} updating)".format(len(self.updaters))
if self.num_failed_updates:
suffix += " ({} failed to update)".format(
len(self.num_failed_updates))
if self.bringup:
suffix += " (bringup=True)"
return "{}/{} target nodes{}".format(len(nodes), target, suffix)
def request_resources(self, resources):
for resource, count in resources.items():
self.resource_requests[resource] = max(
self.resource_requests[resource], count)
logger.info("StandardAutoscaler: resource_requests={}".format(
self.resource_requests))
def kill_workers(self):
logger.error("StandardAutoscaler: kill_workers triggered")
nodes = self.workers()
if nodes:
self.provider.terminate_nodes(nodes)
logger.error("StandardAutoscaler: terminated {} node(s)".format(
len(nodes)))
def typename(v):
if isinstance(v, type):
return v.__name__
else:
return type(v).__name__
def check_required(config, schema):
# Check required schema entries
if not isinstance(config, dict):
raise ValueError("Config is not a dictionary")
for k, (v, kreq) in schema.items():
if v is None:
continue # None means we don't validate the field
if kreq is REQUIRED:
if k not in config:
type_str = typename(v)
raise ValueError(
"Missing required config key `{}` of type {}".format(
k, type_str))
if not isinstance(v, type):
check_required(config[k], v)
def check_extraneous(config, schema):
"""Make sure all items of config are in schema"""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
for k in config:
if k not in schema:
raise ValueError("Unexpected config key `{}` not in {}".format(
k, list(schema.keys())))
v, kreq = schema[k]
if v is None:
continue
elif isinstance(v, type):
if not isinstance(config[k], v):
if v is str and isinstance(config[k], string_types):
continue
raise ValueError(
"Config key `{}` has wrong type {}, expected {}".format(
k,
type(config[k]).__name__, v.__name__))
else:
check_extraneous(config[k], v)
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA):
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
check_required(config, schema)
check_extraneous(config, schema)
def fillout_defaults(config):
defaults = get_default_config(config["provider"])
defaults.update(config)
merge_setup_commands(defaults)
dockerize_if_needed(defaults)
defaults["auth"] = defaults.get("auth", {})
return defaults
def merge_setup_commands(config):
config["head_setup_commands"] = (
config["setup_commands"] + config["head_setup_commands"])
config["worker_setup_commands"] = (
config["setup_commands"] + config["worker_setup_commands"])
return config
def with_head_node_ip(cmds):
head_ip = services.get_node_ip_address()
out = []
for cmd in cmds:
out.append("export RAY_HEAD_IP={}; {}".format(head_ip, cmd))
return out
def hash_launch_conf(node_conf, auth):
hasher = hashlib.sha1()
hasher.update(
json.dumps([node_conf, auth], sort_keys=True).encode("utf-8"))
return hasher.hexdigest()
# Cache the file hashes to avoid rescanning it each time. Also, this avoids
# inadvertently restarting workers if the file mount content is mutated on the
# head node.
_hash_cache = {}
def hash_runtime_conf(file_mounts, extra_objs):
hasher = hashlib.sha1()
def add_content_hashes(path):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
hasher.update(chunk)
path = os.path.expanduser(path)
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
hasher.update(dirpath.encode("utf-8"))
for name in filenames:
hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Important: only hash the files once. Otherwise, we can end up restarting
# workers if the files were changed and we re-hashed them.
if conf_str not in _hash_cache:
hasher.update(conf_str)
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
_hash_cache[conf_str] = hasher.hexdigest()
return _hash_cache[conf_str]
def request_resources(num_cpus=None, num_gpus=None):
"""Remotely request some CPU or GPU resources from the autoscaler.
This function is to be called e.g. on a node before submitting a bunch of
ray.remote calls to ensure that resources rapidly become available.
In the future this could be extended to do GPU cores or other custom
resources.
This function is non blocking.
Args:
num_cpus: int -- the number of CPU cores to request
num_gpus: int -- the number of GPUs to request (Not implemented)
"""
if num_gpus is not None:
raise NotImplementedError(
"GPU resource is not yet supported through request_resources")
r = services.create_redis_client(
global_worker.node.redis_address,
password=global_worker.node.redis_password)
assert isinstance(num_cpus, int)
if num_cpus > 0:
r.publish(AUTOSCALER_RESOURCE_REQUEST_CHANNEL,
json.dumps({
"CPU": num_cpus
}))
|
ujvl/ray-ng
|
python/ray/autoscaler/autoscaler.py
|
Python
|
apache-2.0
| 36,206
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
'''
Adc Package contains
G{packagetree }
'''
|
UManPychron/pychron
|
pychron/hardware/adc/__init__.py
|
Python
|
apache-2.0
| 785
|
#Author: Pieter Lewyllie, pilewyll@cisco.com
from app import app as application
import config
if __name__ == "__main__":
application.run()
|
plewyllie/yangre-gui
|
wsgi.py
|
Python
|
apache-2.0
| 143
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.common import base
from ironicclient.common.i18n import _
from ironicclient.common import utils
from ironicclient import exc
class DeployTemplate(base.Resource):
def __repr__(self):
return "<DeployTemplate %s>" % self._info
class DeployTemplateManager(base.CreateManager):
resource_class = DeployTemplate
_creation_attributes = ['extra', 'name', 'steps', 'uuid']
_resource_name = 'deploy_templates'
def list(self, limit=None, marker=None, sort_key=None, sort_dir=None,
detail=False, fields=None, os_ironic_api_version=None,
global_request_id=None):
"""Retrieve a list of deploy templates.
:param marker: Optional, the UUID of a deploy template, eg the last
template from a previous result set. Return the next
result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of deploy templates to return.
2) limit == 0, return the entire list of deploy templates.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about deploy templates.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned. Can not be used
when 'detail' is set.
:param os_ironic_api_version: String version (e.g. "1.35") to use for
the request. If not specified, the client's default is used.
:param global_request_id: String containing global request ID header
value (in form "req-<UUID>") to use for the request.
:returns: A list of deploy templates.
"""
if limit is not None:
limit = int(limit)
if detail and fields:
raise exc.InvalidAttribute(_("Can't fetch a subset of fields "
"with 'detail' set"))
filters = utils.common_filters(marker, limit, sort_key, sort_dir,
fields, detail=detail)
path = ''
if filters:
path += '?' + '&'.join(filters)
header_values = {"os_ironic_api_version": os_ironic_api_version,
"global_request_id": global_request_id}
if limit is None:
return self._list(self._path(path), "deploy_templates",
**header_values)
else:
return self._list_pagination(self._path(path), "deploy_templates",
limit=limit, **header_values)
def get(self, template_id, fields=None, os_ironic_api_version=None,
global_request_id=None):
return self._get(resource_id=template_id, fields=fields,
os_ironic_api_version=os_ironic_api_version,
global_request_id=global_request_id)
def delete(self, template_id, os_ironic_api_version=None,
global_request_id=None):
return self._delete(resource_id=template_id,
os_ironic_api_version=os_ironic_api_version,
global_request_id=global_request_id)
def update(self, template_id, patch, os_ironic_api_version=None,
global_request_id=None):
return self._update(resource_id=template_id, patch=patch,
os_ironic_api_version=os_ironic_api_version,
global_request_id=global_request_id)
|
openstack/python-ironicclient
|
ironicclient/v1/deploy_template.py
|
Python
|
apache-2.0
| 4,538
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
In order for this to work, the AWS credentials need to be set via environment variables!
Make sure to set following environment variables:
$ export AWS_ACCESS_KEY_ID=<Your AWS Access Key ID>
$ export AWS_SECRET_ACCESS_KEY=<Your AWS Secret Access Key>
"""
import argparse
from ConfigurationHandler import ConfigurationHandler
from libs3 import download, logger
from version import __version__
####################################################################
#
# FUNCTIONS
#
####################################################################
def parse_shell_parameters():
"""
Parse the provided shell parameters
"""
usage = '%(prog)s [-h, --help] [command]'
description = '%(prog)s AWS S3 SquashFS Image Downloader'
epilog = "And now you're in control!"
parser = argparse.ArgumentParser(description=description, epilog=epilog, usage=usage)
parser.add_argument('-v', '--version', action='version', version='%(prog)s ver.{0}'.format(__version__))
parser.add_argument('-o', '--output', action='store', help="Output file (under which to store the S3 object)",
required=True)
parser.add_argument('-k', '--key', action='store', help="The identifying key for this image in S3",
required=True)
parser.add_argument('-b', '--bucket', action='store', default=config.get('S3', 'bucket'),
help="A valid AWS S3 bucket (default: \"{0}\")".format(config.get('S3', 'bucket')))
log.debug("Shell arguments: {0}".format(parser.parse_args()))
return parser.parse_args()
def main():
"""
Run the whole thing
"""
# Get the shell arguments
args = parse_shell_parameters()
# Transfer shell arguments to variables
destination_file = args.output
bucket = args.bucket
image_key = args.key
# Ok, all set! We can download the file ...
log.debug('Downloading with key: "{0}" from bucket: "{1}" to output file: "{2}" '.format(image_key, bucket,
destination_file))
download(destination_file, image_key, bucket)
return 0
####################################################################
#
# MAIN
#
####################################################################
if __name__ == "__main__":
log = logger.get_logger('s3-image-download')
config = ConfigurationHandler().read_configuration()
main()
|
cloudControl/s3-image-load
|
src/s3-image-download.py
|
Python
|
apache-2.0
| 3,210
|
"""Tests for letsencrypt-auto"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from contextlib import contextmanager
from functools import partial
from json import dumps
from os import chmod, environ
from os.path import abspath, dirname, exists, join
import re
from shutil import copy, rmtree
import socket
import ssl
from stat import S_IRUSR, S_IXUSR
from subprocess import CalledProcessError, check_output, Popen, PIPE
import sys
from tempfile import mkdtemp
from threading import Thread
from unittest import TestCase
from nose.tools import eq_, nottest, ok_
@nottest
def tests_dir():
"""Return a path to the "tests" directory."""
return dirname(abspath(__file__))
sys.path.insert(0, dirname(tests_dir()))
from build import build as build_le_auto
class RequestHandler(BaseHTTPRequestHandler):
"""An HTTPS request handler which is quiet and serves a specific folder."""
def __init__(self, resources, *args, **kwargs):
"""
:arg resources: A dict of resource paths pointing to content bytes
"""
self.resources = resources
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
"""Don't log each request to the terminal."""
def do_GET(self):
"""Serve a GET request."""
content = self.send_head()
if content is not None:
self.wfile.write(content)
def send_head(self):
"""Common code for GET and HEAD commands
This sends the response code and MIME headers and returns either a
bytestring of content or, if none is found, None.
"""
path = self.path[1:] # Strip leading slash.
content = self.resources.get(path)
if content is None:
self.send_error(404, 'Path "%s" not found in self.resources' % path)
else:
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
return content
def server_and_port(resources):
"""Return an unstarted HTTPS server and the port it will use."""
# Find a port, and bind to it. I can't get the OS to close the socket
# promptly after we shut down the server, so we typically need to try
# a couple ports after the first test case. Setting
# TCPServer.allow_reuse_address = True seems to have nothing to do
# with this behavior.
worked = False
for port in xrange(4443, 4543):
try:
server = HTTPServer(('localhost', port),
partial(RequestHandler, resources))
except socket.error:
pass
else:
worked = True
server.socket = ssl.wrap_socket(
server.socket,
certfile=join(tests_dir(), 'certs', 'localhost', 'server.pem'),
server_side=True)
break
if not worked:
raise RuntimeError("Couldn't find an unused socket for the testing HTTPS server.")
return server, port
@contextmanager
def serving(resources):
"""Spin up a local HTTPS server, and yield its base URL.
Use a self-signed cert generated as outlined by
https://coolaj86.com/articles/create-your-own-certificate-authority-for-
testing/.
"""
server, port = server_and_port(resources)
thread = Thread(target=server.serve_forever)
try:
thread.start()
yield 'https://localhost:{port}/'.format(port=port)
finally:
server.shutdown()
thread.join()
LE_AUTO_PATH = join(dirname(tests_dir()), 'letsencrypt-auto')
@contextmanager
def ephemeral_dir():
dir = mkdtemp(prefix='le-test-')
try:
yield dir
finally:
rmtree(dir)
def out_and_err(command, input=None, shell=False, env=None):
"""Run a shell command, and return stderr and stdout as string.
If the command returns nonzero, raise CalledProcessError.
:arg command: A list of commandline args
:arg input: Data to pipe to stdin. Omit for none.
Remaining args have the same meaning as for Popen.
"""
process = Popen(command,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
shell=shell,
env=env)
out, err = process.communicate(input=input)
status = process.poll() # same as in check_output(), though wait() sounds better
if status:
raise CalledProcessError(status, command, output=out)
return out, err
def signed(content, private_key_name='signing.key'):
"""Return the signed SHA-256 hash of ``content``, using the given key file."""
command = ['openssl', 'dgst', '-sha256', '-sign',
join(tests_dir(), private_key_name)]
out, err = out_and_err(command, input=content)
return out
def install_le_auto(contents, venv_dir):
"""Install some given source code as the letsencrypt-auto script at the
root level of a virtualenv.
:arg contents: The contents of the built letsencrypt-auto script
:arg venv_dir: The path under which to install the script
"""
venv_le_auto_path = join(venv_dir, 'letsencrypt-auto')
with open(venv_le_auto_path, 'w') as le_auto:
le_auto.write(contents)
chmod(venv_le_auto_path, S_IRUSR | S_IXUSR)
def run_le_auto(venv_dir, base_url, **kwargs):
"""Run the prebuilt version of letsencrypt-auto, returning stdout and
stderr strings.
If the command returns other than 0, raise CalledProcessError.
"""
env = environ.copy()
d = dict(XDG_DATA_HOME=venv_dir,
# URL to PyPI-style JSON that tell us the latest released version
# of LE:
LE_AUTO_JSON_URL=base_url + 'letsencrypt/json',
# URL to dir containing letsencrypt-auto and letsencrypt-auto.sig:
LE_AUTO_DIR_TEMPLATE=base_url + '%s/',
# The public key corresponding to signing.key:
LE_AUTO_PUBLIC_KEY="""-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsMoSzLYQ7E1sdSOkwelg
tzKIh2qi3bpXuYtcfFC0XrvWig071NwIj+dZiT0OLZ2hPispEH0B7ISuuWg1ll7G
hFW0VdbxL6JdGzS2ShNWkX9hE9z+j8VqwDPOBn3ZHm03qwpYkBDwQib3KqOdYbTT
uUtJmmGcuk3a9Aq/sCT6DdfmTSdP5asdQYwIcaQreDrOosaS84DTWI3IU+UYJVgl
LsIVPBuy9IcgHidUQ96hJnoPsDCWsHwX62495QKEarauyKQrJzFes0EY95orDM47
Z5o/NDiQB11m91yNB0MmPYY9QSbnOA9j7IaaC97AwRLuwXY+/R2ablTcxurWou68
iQIDAQAB
-----END PUBLIC KEY-----""",
**kwargs)
env.update(d)
return out_and_err(
join(venv_dir, 'letsencrypt-auto') + ' --version',
shell=True,
env=env)
def set_le_script_version(venv_dir, version):
"""Tell the letsencrypt script to report a certain version.
We actually replace the script with a dummy version that knows only how to
print its version.
"""
with open(join(venv_dir, 'letsencrypt', 'bin', 'letsencrypt'), 'w') as script:
script.write("#!/usr/bin/env python\n"
"from sys import stderr\n"
"stderr.write('letsencrypt %s\\n')" % version)
class AutoTests(TestCase):
"""Test the major branch points of letsencrypt-auto:
* An le-auto upgrade is needed.
* An le-auto upgrade is not needed.
* There was an out-of-date LE script installed.
* There was a current LE script installed.
* There was no LE script installed (less important).
* Peep verification passes.
* Peep has a hash mismatch.
* The OpenSSL sig matches.
* The OpenSSL sig mismatches.
For tests which get to the end, we run merely ``letsencrypt --version``.
The functioning of the rest of the letsencrypt script is covered by other
test suites.
"""
def test_successes(self):
"""Exercise most branches of letsencrypt-auto.
They just happen to be the branches in which everything goes well.
I violate my usual rule of having small, decoupled tests, because...
1. We shouldn't need to run a Cartesian product of the branches: the
phases run in separate shell processes, containing state leakage
pretty effectively. The only shared state is FS state, and it's
limited to a temp dir, assuming (if we dare) all functions properly.
2. One combination of branches happens to set us up nicely for testing
the next, saving code.
"""
NEW_LE_AUTO = build_le_auto(
version='99.9.9',
requirements='# sha256: HMFNYatCTN7kRvUeUPESP4SC7HQFh_54YmyTO7ooc6A\n'
'letsencrypt==99.9.9')
NEW_LE_AUTO_SIG = signed(NEW_LE_AUTO)
with ephemeral_dir() as venv_dir:
# This serves a PyPI page with a higher version, a GitHub-alike
# with a corresponding le-auto script, and a matching signature.
resources = {'letsencrypt/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': NEW_LE_AUTO,
'v99.9.9/letsencrypt-auto.sig': NEW_LE_AUTO_SIG}
with serving(resources) as base_url:
run_letsencrypt_auto = partial(
run_le_auto,
venv_dir,
base_url,
PIP_FIND_LINKS=join(tests_dir(),
'fake-letsencrypt',
'dist'))
# Test when a phase-1 upgrade is needed, there's no LE binary
# installed, and peep verifies:
install_le_auto(build_le_auto(version='50.0.0'), venv_dir)
out, err = run_letsencrypt_auto()
ok_(re.match(r'letsencrypt \d+\.\d+\.\d+',
err.strip().splitlines()[-1]))
# Make a few assertions to test the validity of the next tests:
self.assertIn('Upgrading letsencrypt-auto ', out)
self.assertIn('Creating virtual environment...', out)
# Now we have le-auto 99.9.9 and LE 99.9.9 installed. This
# conveniently sets us up to test the next 2 cases.
# Test when neither phase-1 upgrade nor phase-2 upgrade is
# needed (probably a common case):
out, err = run_letsencrypt_auto()
self.assertNotIn('Upgrading letsencrypt-auto ', out)
self.assertNotIn('Creating virtual environment...', out)
# Test when a phase-1 upgrade is not needed but a phase-2
# upgrade is:
set_le_script_version(venv_dir, '0.0.1')
out, err = run_letsencrypt_auto()
self.assertNotIn('Upgrading letsencrypt-auto ', out)
self.assertIn('Creating virtual environment...', out)
def test_openssl_failure(self):
"""Make sure we stop if the openssl signature check fails."""
with ephemeral_dir() as venv_dir:
# Serve an unrelated hash signed with the good key (easier than
# making a bad key, and a mismatch is a mismatch):
resources = {'': '<a href="letsencrypt/">letsencrypt/</a>',
'letsencrypt/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': build_le_auto(version='99.9.9'),
'v99.9.9/letsencrypt-auto.sig': signed('something else')}
with serving(resources) as base_url:
copy(LE_AUTO_PATH, venv_dir)
try:
out, err = run_le_auto(venv_dir, base_url)
except CalledProcessError as exc:
eq_(exc.returncode, 1)
self.assertIn("Couldn't verify signature of downloaded "
"letsencrypt-auto.",
exc.output)
else:
self.fail('Signature check on letsencrypt-auto erroneously passed.')
def test_peep_failure(self):
"""Make sure peep stops us if there is a hash mismatch."""
with ephemeral_dir() as venv_dir:
resources = {'': '<a href="letsencrypt/">letsencrypt/</a>',
'letsencrypt/json': dumps({'releases': {'99.9.9': None}})}
with serving(resources) as base_url:
# Build a le-auto script embedding a bad requirements file:
install_le_auto(
build_le_auto(
version='99.9.9',
requirements='# sha256: badbadbadbadbadbadbadbadbadbadbadbadbadbadb\n'
'configobj==5.0.6'),
venv_dir)
try:
out, err = run_le_auto(venv_dir, base_url)
except CalledProcessError as exc:
eq_(exc.returncode, 1)
self.assertIn("THE FOLLOWING PACKAGES DIDN'T MATCH THE "
"HASHES SPECIFIED IN THE REQUIREMENTS",
exc.output)
ok_(not exists(join(venv_dir, 'letsencrypt')),
msg="The virtualenv was left around, even though "
"installation didn't succeed. We shouldn't do "
"this, as it foils our detection of whether we "
"need to recreate the virtualenv, which hinges "
"on the presence of $VENV_BIN/letsencrypt.")
else:
self.fail("Peep didn't detect a bad hash and stop the "
"installation.")
|
thanatos/lets-encrypt-preview
|
letsencrypt-auto-source/tests/auto_test.py
|
Python
|
apache-2.0
| 13,742
|
"""Tests for selectors.py."""
import unittest
import unittest.mock
from asyncio import selectors
class FakeSelector(selectors._BaseSelectorImpl):
"""Trivial non-abstract subclass of BaseSelector."""
def select(self, timeout=None):
raise NotImplementedError
class _SelectorMappingTests(unittest.TestCase):
def test_len(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(map.__len__() == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
self.assertTrue(len(map) == 1)
def test_getitem(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
attended = selectors.SelectorKey(f, 10, selectors.EVENT_READ, None)
self.assertEqual(attended, map.__getitem__(f))
def test_getitem_key_error(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(len(map) == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 10
s.register(f, selectors.EVENT_READ, None)
self.assertRaises(KeyError, map.__getitem__, 5)
def test_iter(self):
s = FakeSelector()
map = selectors._SelectorMapping(s)
self.assertTrue(len(map) == 0)
f = unittest.mock.Mock()
f.fileno.return_value = 5
s.register(f, selectors.EVENT_READ, None)
counter = 0
for fileno in map.__iter__():
self.assertEqual(5, fileno)
counter += 1
for idx in map:
self.assertEqual(f, map[idx].fileobj)
self.assertEqual(1, counter)
class BaseSelectorTests(unittest.TestCase):
def test_fileobj_to_fd(self):
self.assertEqual(10, selectors._fileobj_to_fd(10))
f = unittest.mock.Mock()
f.fileno.return_value = 10
self.assertEqual(10, selectors._fileobj_to_fd(f))
f.fileno.side_effect = AttributeError
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
f.fileno.return_value = -1
self.assertRaises(ValueError, selectors._fileobj_to_fd, f)
def test_selector_key_repr(self):
key = selectors.SelectorKey(10, 10, selectors.EVENT_READ, None)
self.assertEqual(
"SelectorKey(fileobj=10, fd=10, events=1, data=None)", repr(key))
def test_register(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ)
self.assertIsInstance(key, selectors.SelectorKey)
self.assertEqual(key.fd, 10)
self.assertIs(key, s._fd_to_key[10])
def test_register_unknown_event(self):
s = FakeSelector()
self.assertRaises(ValueError, s.register, unittest.mock.Mock(), 999999)
def test_register_already_registered(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
s.register(fobj, selectors.EVENT_READ)
self.assertRaises(KeyError, s.register, fobj, selectors.EVENT_READ)
def test_unregister(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
s.register(fobj, selectors.EVENT_READ)
s.unregister(fobj)
self.assertFalse(s._fd_to_key)
def test_unregister_unknown(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
self.assertRaises(KeyError, s.unregister, fobj)
def test_modify_unknown(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
self.assertRaises(KeyError, s.modify, fobj, 1)
def test_modify(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ)
key2 = s.modify(fobj, selectors.EVENT_WRITE)
self.assertNotEqual(key.events, key2.events)
self.assertEqual(
selectors.SelectorKey(fobj, 10, selectors.EVENT_WRITE, None),
s.get_key(fobj))
def test_modify_data(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
d1 = object()
d2 = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, d1)
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
self.assertEqual(key.events, key2.events)
self.assertNotEqual(key.data, key2.data)
self.assertEqual(
selectors.SelectorKey(fobj, 10, selectors.EVENT_READ, d2),
s.get_key(fobj))
def test_modify_data_use_a_shortcut(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
d1 = object()
d2 = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, d1)
s.unregister = unittest.mock.Mock()
s.register = unittest.mock.Mock()
key2 = s.modify(fobj, selectors.EVENT_READ, d2)
self.assertFalse(s.unregister.called)
self.assertFalse(s.register.called)
def test_modify_same(self):
fobj = unittest.mock.Mock()
fobj.fileno.return_value = 10
data = object()
s = FakeSelector()
key = s.register(fobj, selectors.EVENT_READ, data)
key2 = s.modify(fobj, selectors.EVENT_READ, data)
self.assertIs(key, key2)
def test_select(self):
s = FakeSelector()
self.assertRaises(NotImplementedError, s.select)
def test_close(self):
s = FakeSelector()
s.register(1, selectors.EVENT_READ)
s.close()
self.assertFalse(s._fd_to_key)
def test_context_manager(self):
s = FakeSelector()
with s as sel:
sel.register(1, selectors.EVENT_READ)
self.assertFalse(s._fd_to_key)
def test_key_from_fd(self):
s = FakeSelector()
key = s.register(1, selectors.EVENT_READ)
self.assertIs(key, s._key_from_fd(1))
self.assertIsNone(s._key_from_fd(10))
if hasattr(selectors.DefaultSelector, 'fileno'):
def test_fileno(self):
self.assertIsInstance(selectors.DefaultSelector().fileno(), int)
if __name__ == '__main__':
unittest.main()
|
bslatkin/pycon2014
|
lib/asyncio-0.4.1/tests/test_selectors.py
|
Python
|
apache-2.0
| 6,463
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, List, Optional, Set, Type, TYPE_CHECKING
from flask_babel import _
from sqlalchemy import or_
from sqlalchemy.orm import Session, subqueryload
from sqlalchemy.orm.exc import NoResultFound
from superset.datasets.commands.exceptions import DatasetNotFoundError
if TYPE_CHECKING:
from collections import OrderedDict
from superset.connectors.base.models import BaseDatasource
from superset.models.core import Database
class ConnectorRegistry:
""" Central Registry for all available datasource engines"""
sources: Dict[str, Type["BaseDatasource"]] = {}
@classmethod
def register_sources(cls, datasource_config: "OrderedDict[str, List[str]]") -> None:
for module_name, class_names in datasource_config.items():
class_names = [str(s) for s in class_names]
module_obj = __import__(module_name, fromlist=class_names)
for class_name in class_names:
source_class = getattr(module_obj, class_name)
cls.sources[source_class.type] = source_class
@classmethod
def get_datasource(
cls, datasource_type: str, datasource_id: int, session: Session
) -> "BaseDatasource":
"""Safely get a datasource instance, raises `DatasetNotFoundError` if
`datasource_type` is not registered or `datasource_id` does not
exist."""
if datasource_type not in cls.sources:
raise DatasetNotFoundError()
datasource = (
session.query(cls.sources[datasource_type])
.filter_by(id=datasource_id)
.one_or_none()
)
if not datasource:
raise DatasetNotFoundError()
return datasource
@classmethod
def get_all_datasources(cls, session: Session) -> List["BaseDatasource"]:
datasources: List["BaseDatasource"] = []
for source_type in ConnectorRegistry.sources:
source_class = ConnectorRegistry.sources[source_type]
qry = session.query(source_class)
qry = source_class.default_query(qry)
datasources.extend(qry.all())
return datasources
@classmethod
def get_datasource_by_id( # pylint: disable=too-many-arguments
cls, session: Session, datasource_id: int,
) -> "BaseDatasource":
"""
Find a datasource instance based on the unique id.
:param session: Session to use
:param datasource_id: unique id of datasource
:return: Datasource corresponding to the id
:raises NoResultFound: if no datasource is found corresponding to the id
"""
for datasource_class in ConnectorRegistry.sources.values():
try:
return (
session.query(datasource_class)
.filter(datasource_class.id == datasource_id)
.one()
)
except NoResultFound:
# proceed to next datasource type
pass
raise NoResultFound(_("Datasource id not found: %(id)s", id=datasource_id))
@classmethod
def get_datasource_by_name( # pylint: disable=too-many-arguments
cls,
session: Session,
datasource_type: str,
datasource_name: str,
schema: str,
database_name: str,
) -> Optional["BaseDatasource"]:
datasource_class = ConnectorRegistry.sources[datasource_type]
return datasource_class.get_datasource_by_name(
session, datasource_name, schema, database_name
)
@classmethod
def query_datasources_by_permissions( # pylint: disable=invalid-name
cls,
session: Session,
database: "Database",
permissions: Set[str],
schema_perms: Set[str],
) -> List["BaseDatasource"]:
# TODO(bogdan): add unit test
datasource_class = ConnectorRegistry.sources[database.type]
return (
session.query(datasource_class)
.filter_by(database_id=database.id)
.filter(
or_(
datasource_class.perm.in_(permissions),
datasource_class.schema_perm.in_(schema_perms),
)
)
.all()
)
@classmethod
def get_eager_datasource(
cls, session: Session, datasource_type: str, datasource_id: int
) -> "BaseDatasource":
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
)
@classmethod
def query_datasources_by_name(
cls,
session: Session,
database: "Database",
datasource_name: str,
schema: Optional[str] = None,
) -> List["BaseDatasource"]:
datasource_class = ConnectorRegistry.sources[database.type]
return datasource_class.query_datasources_by_name(
session, database, datasource_name, schema=schema
)
|
mistercrunch/panoramix
|
superset/connectors/connector_registry.py
|
Python
|
apache-2.0
| 6,020
|
# Copyright 2011 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import httplib
from oslo.config import cfg
import stubout
from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.pci import pci_stats
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
from nova.scheduler.filters import trusted_filter
from nova import servicegroup
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
class TestFilter(filters.BaseHostFilter):
pass
class TestBogusFilter(object):
"""Class that doesn't inherit from BaseHostFilter."""
pass
class ExtraSpecsOpsTestCase(test.NoDBTestCase):
def _do_extra_specs_ops_test(self, value, req, matches):
assertion = self.assertTrue if matches else self.assertFalse
assertion(extra_specs_ops.match(value, req))
def test_extra_specs_matches_simple(self):
self._do_extra_specs_ops_test(
value='1',
req='1',
matches=True)
def test_extra_specs_fails_simple(self):
self._do_extra_specs_ops_test(
value='',
req='1',
matches=False)
def test_extra_specs_fails_simple2(self):
self._do_extra_specs_ops_test(
value='3',
req='1',
matches=False)
def test_extra_specs_fails_simple3(self):
self._do_extra_specs_ops_test(
value='222',
req='2',
matches=False)
def test_extra_specs_fails_with_bogus_ops(self):
self._do_extra_specs_ops_test(
value='4',
req='> 2',
matches=False)
def test_extra_specs_matches_with_op_eq(self):
self._do_extra_specs_ops_test(
value='123',
req='= 123',
matches=True)
def test_extra_specs_matches_with_op_eq2(self):
self._do_extra_specs_ops_test(
value='124',
req='= 123',
matches=True)
def test_extra_specs_fails_with_op_eq(self):
self._do_extra_specs_ops_test(
value='34',
req='= 234',
matches=False)
def test_extra_specs_fails_with_op_eq3(self):
self._do_extra_specs_ops_test(
value='34',
req='=',
matches=False)
def test_extra_specs_matches_with_op_seq(self):
self._do_extra_specs_ops_test(
value='123',
req='s== 123',
matches=True)
def test_extra_specs_fails_with_op_seq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s== 123',
matches=False)
def test_extra_specs_matches_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s!= 123',
matches=True)
def test_extra_specs_fails_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='123',
req='s!= 123',
matches=False)
def test_extra_specs_fails_with_op_sge(self):
self._do_extra_specs_ops_test(
value='1000',
req='s>= 234',
matches=False)
def test_extra_specs_fails_with_op_sle(self):
self._do_extra_specs_ops_test(
value='1234',
req='s<= 1000',
matches=False)
def test_extra_specs_fails_with_op_sl(self):
self._do_extra_specs_ops_test(
value='2',
req='s< 12',
matches=False)
def test_extra_specs_fails_with_op_sg(self):
self._do_extra_specs_ops_test(
value='12',
req='s> 2',
matches=False)
def test_extra_specs_matches_with_op_in(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 11',
matches=True)
def test_extra_specs_matches_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321',
matches=True)
def test_extra_specs_matches_with_op_in3(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321 <in>',
matches=True)
def test_extra_specs_fails_with_op_in(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11',
matches=False)
def test_extra_specs_fails_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11 <in>',
matches=False)
def test_extra_specs_matches_with_op_or(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12',
matches=True)
def test_extra_specs_matches_with_op_or2(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12 <or>',
matches=True)
def test_extra_specs_fails_with_op_or(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12',
matches=False)
def test_extra_specs_fails_with_op_or2(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12 <or>',
matches=False)
def test_extra_specs_matches_with_op_le(self):
self._do_extra_specs_ops_test(
value='2',
req='<= 10',
matches=True)
def test_extra_specs_fails_with_op_le(self):
self._do_extra_specs_ops_test(
value='3',
req='<= 2',
matches=False)
def test_extra_specs_matches_with_op_ge(self):
self._do_extra_specs_ops_test(
value='3',
req='>= 1',
matches=True)
def test_extra_specs_fails_with_op_ge(self):
self._do_extra_specs_ops_test(
value='2',
req='>= 3',
matches=False)
class HostFiltersTestCase(test.NoDBTestCase):
"""Test case for host filters."""
# FIXME(sirp): These tests still require DB access until we can separate
# the testing of the DB API code from the host-filter code.
USES_DB = True
def fake_oat_request(self, *args, **kwargs):
"""Stubs out the response from OAT service."""
self.oat_attested = True
return httplib.OK, self.oat_data
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.oat_data = ''
self.oat_attested = False
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(trusted_filter.AttestationService, '_request',
self.fake_oat_request)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
filter_handler = filters.HostFilterHandler()
classes = filter_handler.get_matching_classes(
['nova.scheduler.filters.all_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def test_all_filters(self):
# Double check at least a couple of known filters exist
self.assertIn('AllHostsFilter', self.class_map)
self.assertIn('ComputeFilter', self.class_map)
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(self, service):
return ret_value
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
db.instance_destroy(self.context, instance_uuid)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
db.instance_destroy(self.context, instance_uuid)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'id': 1}}
filter2_properties = {'context': self.context,
'instance_type': {'id': 2}}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
#True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
#True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
#False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateTypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'name': 'fake1'}}
filter2_properties = {'context': self.context,
'instance_type': {'name': 'fake2'}}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
#True since no aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#True since type matches aggregate, metadata
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['fake_host'], metadata={'instance_type': 'fake1'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since type matches aggregate, metadata
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
def test_ram_filter_fails_on_memory(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=2.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_value_error(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'service': service})
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'ram_allocation_ratio': 'XXX'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_default_value(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
# False: fallback to default flag w/o aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'ram_allocation_ratio': '2.0'})
# True: use ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_conflict_values(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
self._create_aggregate_with_host(name='fake_aggregate1',
hosts=['host1'],
metadata={'ram_allocation_ratio': '1.5'})
self._create_aggregate_with_host(name='fake_aggregate2',
hosts=['host1'],
metadata={'ram_allocation_ratio': '2.0'})
# use the minimum ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
def test_disk_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1, 'swap': 512}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 10,
'ephemeral_gb': 1, 'swap': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 18, 'swap': 1024}}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19, 'swap': 1024}}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def _test_compute_filter_fails_on_service_disabled(self,
reason=None):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': True}
if reason:
service['disabled_reason'] = reason
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_disabled_no_reason(self):
self._test_compute_filter_fails_on_service_disabled()
def test_compute_filter_fails_on_service_disabled(self):
self._test_compute_filter_fails_on_service_disabled(reason='Test')
def test_compute_filter_fails_on_service_down(self):
self._stub_service_is_up(False)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_same_inst_props_and_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'_architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0,<6.2'
}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_different_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'arm',
'hypervisor_type': 'qemu',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_different_hyper_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.2'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'xen', 'xen')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
filter_properties = {'request_spec': {}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_without_host_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'enabled': True,
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_hyper_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': 5000}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
"""In real Openstack runtime environment,compute capabilities
value may be number, so we should use number to do unit test.
"""
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
capabilities = {}
capabilities.update(ecaps)
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
host_state = {'free_ram_mb': 1024, 'service': service}
host_state.update(capabilities)
host = fakes.FakeHostState('host1', 'node1', host_state)
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_fails_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
passes=False)
def test_compute_filter_pass_extra_specs_simple_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'capabilities:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_pass_extra_specs_same_as_scope(self):
# Make sure this still works even if the key is the same as the scope
self._do_test_compute_filter_extra_specs(
ecaps={'capabilities': 1},
especs={'capabilities': '1'},
passes=True)
def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': 1, 'opt2': 2},
especs={'wrong_scope:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
'trust:trusted_host': 'true'},
passes=True)
def test_aggregate_filter_passes_no_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
capabilities = {'opt1': 1, 'opt2': 2}
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _create_aggregate_with_host(self, name='fake_aggregate',
metadata=None,
hosts=['host1']):
values = {'name': name}
if metadata:
metadata['availability_zone'] = 'fake_avail_zone'
else:
metadata = {'availability_zone': 'fake_avail_zone'}
result = db.aggregate_create(self.context.elevated(), values, metadata)
for host in hosts:
db.aggregate_host_add(self.context.elevated(), result['id'], host)
return result
def _do_test_aggregate_filter_extra_specs(self, emeta, especs, passes):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_fails_extra_specs_deleted_host(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
extra_specs = {'opt1': 's== 1', 'opt2': 's== 2',
'trust:trusted_host': 'true'}
self._create_aggregate_with_host(metadata={'opt1': '1'})
agg2 = self._create_aggregate_with_host(name='fake2',
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2['id'], 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self):
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt2': '2',
# Scoped extra spec that does not apply to this filter
'trust:trusted_host': 'true',
}
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'}, especs=especs, passes=True)
def test_aggregate_filter_passes_with_key_same_as_scope(self):
especs = {
# Un-scoped extra spec, make sure we don't blow up if it
# happens to match our scope.
'aggregate_instance_extra_specs': '1',
}
self._do_test_aggregate_filter_extra_specs(
emeta={'aggregate_instance_extra_specs': '1'},
especs=especs, passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self):
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '222',
'trust:trusted_host': 'true'},
passes=False)
def _do_test_isolated_hosts(self, host_in_list, image_in_list,
set_flags=True,
restrict_isolated_hosts_to_isolated_images=True):
if set_flags:
self.flags(isolated_images=['isolated_image'],
isolated_hosts=['isolated_host'],
restrict_isolated_hosts_to_isolated_images=
restrict_isolated_hosts_to_isolated_images)
host_name = 'isolated_host' if host_in_list else 'free_host'
image_ref = 'isolated_image' if image_in_list else 'free_image'
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': image_ref}
}
}
filt_cls = self.class_map['IsolatedHostsFilter']()
host = fakes.FakeHostState(host_name, 'node', {})
return filt_cls.host_passes(host, filter_properties)
def test_isolated_hosts_fails_isolated_on_non_isolated(self):
self.assertFalse(self._do_test_isolated_hosts(False, True))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
self.assertFalse(self._do_test_isolated_hosts(True, False))
def test_isolated_hosts_passes_isolated_on_isolated(self):
self.assertTrue(self._do_test_isolated_hosts(True, True))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
self.assertTrue(self._do_test_isolated_hosts(False, False))
def test_isolated_hosts_no_config(self):
# If there are no hosts nor isolated images in the config, it should
# not filter at all. This is the default config.
self.assertTrue(self._do_test_isolated_hosts(False, True, False))
self.assertTrue(self._do_test_isolated_hosts(True, False, False))
self.assertTrue(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_no_hosts_config(self):
self.flags(isolated_images=['isolated_image'])
# If there are no hosts in the config, it should only filter out
# images that are listed
self.assertFalse(self._do_test_isolated_hosts(False, True, False))
self.assertTrue(self._do_test_isolated_hosts(True, False, False))
self.assertFalse(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_no_images_config(self):
self.flags(isolated_hosts=['isolated_host'])
# If there are no images in the config, it should only filter out
# isolated_hosts
self.assertTrue(self._do_test_isolated_hosts(False, True, False))
self.assertFalse(self._do_test_isolated_hosts(True, False, False))
self.assertFalse(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_less_restrictive(self):
# If there are isolated hosts and non isolated images
self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
# If there are isolated hosts and isolated images
self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
# If there are non isolated hosts and non isolated images
self.assertTrue(self._do_test_isolated_hosts(False, False, True,
False))
# If there are non isolated hosts and isolated images
self.assertFalse(self._do_test_isolated_hosts(False, True, True,
False))
def test_json_filter_passes(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
# Test json filter more thoroughly.
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
filt_cls = self.class_map['JsonFilter']()
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'node1',
{})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_default_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_update_cache(self):
self.oat_data = {"hosts": [{"host_name":
"host1", "trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
filt_cls.host_passes(host, filter_properties) # Fill the caches
self.oat_attested = False
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
self.oat_attested = False
timeutils.set_time_override(timeutils.utcnow())
timeutils.advance_time_seconds(
CONF.trusted_computing.attestation_auth_timeout + 80)
filt_cls.host_passes(host, filter_properties)
self.assertTrue(self.oat_attested)
timeutils.clear_time_override()
def test_trusted_filter_update_cache_timezone(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "untrusted",
"vtime": "2012-09-09T05:10:40-04:00"}]}
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
timeutils.set_time_override(
timeutils.normalize_time(
timeutils.parse_isotime("2012-09-09T09:10:40Z")))
filt_cls.host_passes(host, filter_properties) # Fill the caches
self.oat_attested = False
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
self.oat_attested = False
timeutils.advance_time_seconds(
CONF.trusted_computing.attestation_auth_timeout - 10)
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
timeutils.clear_time_override()
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_core_filter_value_error(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'cpu_allocation_ratio': 'XXX'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
def test_aggregate_core_filter_default_value(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
# False: fallback to default flag w/o aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '3'})
# True: use ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 3, host.limits['vcpu'])
def test_aggregate_core_filter_conflict_values(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=1)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self._create_aggregate_with_host(name='fake_aggregate1',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '2'})
self._create_aggregate_with_host(name='fake_aggregate2',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '3'})
# use the minimum ratio from aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[['host1', 'node1'], # same host, different node
['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 8})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
filt_cls = self.class_map['GroupAntiAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_hosts': []}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_hosts': ['host2']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
filt_cls = self.class_map['GroupAntiAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
filt_cls = self.class_map['GroupAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
filt_cls = self.class_map['GroupAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_with_meta_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {'filter_tenant_id': 'my_tenantid'}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {'filter_tenant_id': 'other_tenantid'}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_no_meta_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _fake_pci_support_requests(self, pci_requests):
self.pci_requests = pci_requests
return self.pci_request_result
def test_pci_passthrough_pass(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = True
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
def test_pci_passthrough_fail(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = False
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
def test_pci_passthrough_no_pci_request(self):
filt_cls = self.class_map['PciPassthroughFilter']()
filter_properties = {}
host = fakes.FakeHostState('h1', 'n1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_pci_passthrough_comopute_stats(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={})
self.pci_request_result = True
self.assertRaises(AttributeError, filt_cls.host_passes,
host, filter_properties)
|
n0ano/gantt
|
gantt/tests/scheduler/test_host_filters.py
|
Python
|
apache-2.0
| 72,910
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1NFSVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'path': 'str',
'read_only': 'bool',
'server': 'str'
}
attribute_map = {
'path': 'path',
'read_only': 'readOnly',
'server': 'server'
}
def __init__(self, path=None, read_only=None, server=None, local_vars_configuration=None): # noqa: E501
"""V1NFSVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._path = None
self._read_only = None
self._server = None
self.discriminator = None
self.path = path
if read_only is not None:
self.read_only = read_only
self.server = server
@property
def path(self):
"""Gets the path of this V1NFSVolumeSource. # noqa: E501
Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:return: The path of this V1NFSVolumeSource. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1NFSVolumeSource.
Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:param path: The path of this V1NFSVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
self._path = path
@property
def read_only(self):
"""Gets the read_only of this V1NFSVolumeSource. # noqa: E501
ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:return: The read_only of this V1NFSVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1NFSVolumeSource.
ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:param read_only: The read_only of this V1NFSVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def server(self):
"""Gets the server of this V1NFSVolumeSource. # noqa: E501
Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:return: The server of this V1NFSVolumeSource. # noqa: E501
:rtype: str
"""
return self._server
@server.setter
def server(self, server):
"""Sets the server of this V1NFSVolumeSource.
Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
:param server: The server of this V1NFSVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and server is None: # noqa: E501
raise ValueError("Invalid value for `server`, must not be `None`") # noqa: E501
self._server = server
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NFSVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NFSVolumeSource):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v1_nfs_volume_source.py
|
Python
|
apache-2.0
| 5,884
|
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log parsing functions for logs related to DDS events.
Functions:
+ on_query_udpv4_interfaces: it happens when it queries the interfaces.
+ on_find_valid_interface: it happens when a valid interface is found.
+ on_get_valid_interface: it happens when a valid interface is queried.
+ on_skipped_interface: it happens when an interface is skipped.
+ on_create_participant: it happens for new participants.
+ on_delete_participant: it happens for deleted participants.
+ on_create_topic: it happens for new topics.
+ on_create_cft: it happens for new CFT.
+ on_delete_topic: it happens for deleted topics.
+ on_create_writer: it happens for new DataWriters.
+ on_create_reader: it happens for new DataReader.
+ on_delete_writer: it happens for deleted DataWriters.
+ on_delete_reader: it happens for deleted DataReaders.
+ on_duplicate_topic_name_error: it happens for topics with same name.
+ on_delete_topic_before_cft: it happens when deleting a topic before a CFT.
+ on_fail_delete_flowcontrollers: it happens when delete FC fails.
+ on_inconsistent_transport_discovery_configuration: it happens for discovery
+ on_discover_participant: it happens for discovered participants.
+ on_update_remote_participant: it happens when updating remote participant.
+ on_announce_local_participant: it happens when announcing participant.
+ on_discover_publication: it happens for discovered writers.
+ on_update_endpoint: it happens when updating an endpoint.
+ on_announce_local_publication: it happens when announcing a writer.
+ on_announce_local_subscription: it happens when announcing a reader.
+ on_participant_ignore_itself: it happens when ignoring itself.
+ on_lose_discovery_samples: it happens when losing discovery samples.
+ on_match_entity: it happens when an entity is matched.
+ on_different_type_names: it happens when TypeNames are different.
+ on_typeobject_received: it happens when comparing TypeObjects.
+ on_register_unkeyed_instance: it happens on registering unkeyed instances.
+ on_get_unkeyed_key: it happens when getting key from unkeyed sample.
+ on_unregister_unkeyed_instance: it happens when unregistering unkeyed.
+ on_library_version: it happens for the library version.
+ on_participant_initial_peers: it happens for the initial peers.
+ on_envvar_file_not_found: it happens when it can't find an env var or file.
+ on_envvar_file_found: it happens when it finds an env var or file.
"""
from __future__ import absolute_import
from devices.logger import (log_cfg, log_error, log_event, log_process,
log_warning)
from utils import (get_locator, get_oid, get_participant, get_topic_name,
get_type_name, hex2ip, is_builtin_entity, parse_guid,
set_local_address)
# --------------------------------------------------------------------------- #
# -- Network Interfaces -- #
# --------------------------------------------------------------------------- #
def on_query_udpv4_interfaces(match, state):
"""It happens when it queries the interfaces."""
flags = {
0x01: "UP", 0x02: "BROADCAST", 0x04: "LOOPBACK", 0x08: "POINTOPOINT",
0x10: "MULTICAST", 0x20: "RUNNING"}
addr = get_participant(hex2ip(match[0], True), state)
flag = int(match[1], 16)
flag_name = ""
for bit in flags:
if flag & bit != 0:
flag_name += flags[bit] + "|"
log_event("Interface: %s is %s" % (addr, flag_name[:-1]), state, 2)
def on_find_valid_interface(match, state):
"""It happens when a valid interface is found."""
log_cfg("Valid interface: %s" % match[0], state)
def on_get_valid_interface(match, state):
"""It happens when a valid interface is queried."""
if match[2] == "1":
multicast = "with" if match[3] == "1" else "no"
log_cfg("Valid interface: %s (%s multicast)" % (match[1], multicast),
state)
def on_skipped_interface(match, state):
"""It happens when an interface is skipped."""
log_event("Skipped interface: %s" % match[0], state, 2)
# --------------------------------------------------------------------------- #
# -- Create or delete entities -- #
# --------------------------------------------------------------------------- #
def on_create_participant(match, state):
"""It happens for new participants."""
log_event("Created participant, domain: %3s index: %s" %
(match[0], match[1]), state)
def on_delete_participant(match, state):
"""It happens for deleted participants."""
log_event("Deleted participant, domain: %3s index: %s" %
(match[0], match[1]), state)
def on_create_topic(match, state):
"""It happens for new topics."""
topic = get_topic_name(match[0], state)
typ = get_type_name(match[1], state)
log_event("Created topic, name: '%s', type: '%s'" %
(topic, typ), state)
def on_create_cft(match, state):
"""It happens for new CFT."""
topic = get_topic_name(match[0], state)
log_event("Created ContentFilteredTopic, name: '%s'" % topic, state)
def on_delete_topic(match, state):
"""It happens for deleted topics."""
topic = get_topic_name(match[0], state)
typ = get_type_name(match[1], state)
log_event("Deleted topic, name: '%s', type: '%s'" % (topic, typ),
state, 1)
def on_create_writer(match, state):
"""It happens for new DataWriters."""
topic = get_topic_name(match[0], state)
log_event("Created writer for topic '%s'" % topic, state)
def on_create_reader(match, state):
"""It happens for new DataReader."""
topic = get_topic_name(match[0], state)
log_event("Created reader for topic '%s'" % topic, state)
def on_delete_writer(match, state):
"""It happens for deleted DataWriters."""
topic = get_topic_name(match[0], state)
log_event("Deleted writer for topic '%s'" % topic, state)
def on_delete_reader(match, state):
"""It happens for deleted DataReaders."""
topic = get_topic_name(match[0], state)
log_event("Deleted reader for topic '%s'" % topic, state)
def on_duplicate_topic_name_error(match, state):
"""It happens when there is a topic name duplication."""
topic = get_topic_name(match[0], state)
log_error("[LP-2] Topic name already in use by another topic: %s" % topic,
state)
def on_delete_topic_before_cft(match, state):
"""It happens when deleting a topic before its CFT."""
num_cft = match[0]
log_error("[LP-7] Cannot delete topic before its %s ContentFilteredTopics"
% num_cft, state)
def on_fail_delete_flowcontrollers(match, state):
"""It happens when delete FC fails."""
num_flowcontrol = match[0]
log_error("[LP-15] Cannot delete %s FlowControllers" % (num_flowcontrol) +
" from delete_contained_entities", state)
# pylint: disable=W0613
def on_inconsistent_transport_discovery_configuration(match, state):
"""It happens for inconsistencies in the discovery configuration."""
log_error("Inconsistent transport/discovery configuration", state)
# --------------------------------------------------------------------------- #
# -- Discover remote or local entities -- #
# --------------------------------------------------------------------------- #
def on_discover_participant(match, state):
"""It happens for discovered participants."""
local_address = parse_guid(state, match[0], match[1])
full_addr = parse_guid(state, match[0], match[1], match[2])
full_addr = " ".join(full_addr.split())
log_process(local_address, "", "Discovered new participant (%s)" %
full_addr, state)
def on_update_remote_participant(match, state):
"""It happens when updating remote participant."""
local_address = parse_guid(state, match[0], match[1])
full_addr = parse_guid(state, match[0], match[1], match[2])
full_addr = " ".join(full_addr.split())
part_oid = get_oid(match[3])
log_process(local_address, "", "Discovered/Updated participant (%s - %s)" %
(full_addr, part_oid), state, 1)
def on_announce_local_participant(match, state):
"""It happens when announcing participant."""
guid = hex2ip(match[0]) + " " + str(int(match[1], 16)).zfill(5)
set_local_address(guid, state)
def on_discover_publication(match, state):
"""It happens for discovered writers."""
remote_addr = parse_guid(state, match[0], match[1], match[2])
pub_oid = get_oid(match[3])
log_process(remote_addr, "",
"Discovered new publication %s" % pub_oid,
state)
def on_update_endpoint(match, state):
"""It happens when updating an endpoint."""
remote_addr = parse_guid(state, match[0], match[1], match[2])
pub_oid = get_oid(match[3])
log_process(remote_addr, "", "Discovered/Updated publication %s" % pub_oid,
state, 1)
def on_announce_local_publication(match, state):
"""It happens when announcing a writer."""
local_addr = parse_guid(state, match[0], match[1], match[2])
pub_oid = get_oid(match[3])
log_process(local_addr, "", "Announcing new writer %s" % pub_oid, state)
def on_announce_local_subscription(match, state):
"""It happens when announcing a reader."""
local_addr = parse_guid(state, match[0], match[1], match[2])
sub_oid = get_oid(match[3])
log_process(local_addr, "", "Announcing new reader %s" % sub_oid, state)
# pylint: disable=W0613
def on_participant_ignore_itself(match, state):
"""It happens when ignoring itself."""
log_process("", "", "Participant is ignoring itself", state)
def on_lose_discovery_samples(match, state):
"""It happens when losing discovery samples."""
entity_type = match[0]
entity_oid = get_oid(match[1])
total = match[2]
delta = match[3]
log_warning("%s discovery samples lost for %s %s (%s in total)" %
(delta, entity_type, entity_oid, total), state)
# --------------------------------------------------------------------------- #
# -- Match remote or local entities -- #
# --------------------------------------------------------------------------- #
def on_match_entity(entity2, kind):
"""It happens when an entity is matched."""
def match_entity(match, state):
"""It happens when a specific entity is matched."""
entity2_addr = parse_guid(state, match[0], match[1], match[2])
entity2_oid = get_oid(match[3])
entity1_oid = get_oid(match[4])
verb = 1 if is_builtin_entity(match[4]) else 0
reliable = match[5] # Best-Effort or Reliable
log_process(entity2_addr, entity1_oid, "Discovered %s %s %s %s" %
(kind, reliable, entity2, entity2_oid),
state,
verb)
return match_entity
def on_different_type_names(match, state):
"""It happens when there isn't TypeObject and type names are different."""
topic = get_topic_name(match[0], state)
type1 = get_type_name(match[1], state)
type2 = get_type_name(match[2], state)
log_error("[LP-18] Cannot match remote entity in topic '%s': " % (topic) +
"Different type names found ('%s', '%s')" % (type1, type2),
state)
def on_typeobject_received(match, state):
"""It happens for discovered entities when comparing TypeObjects."""
log_process("", "", "TypeObject %s" % match[0], state, 2)
# --------------------------------------------------------------------------- #
# -- Bad usage of the API -- #
# --------------------------------------------------------------------------- #
# pylint: disable=W0613
def on_register_unkeyed_instance(match, state):
"""It happens when registering unkeyed instances."""
log_warning("[LP-4] Try to register instance with no key field.", state)
# pylint: disable=W0613
def on_get_unkeyed_key(match, state):
"""It happens when getting key from unkeyed sample."""
log_error("[LP-5] Try to get key from unkeyed type.", state)
def on_unregister_unkeyed_instance(match, state):
"""It happens when unregistering unkeyed sample."""
log_warning("[LP-6] Try to unregister instance with no key field.", state)
# --------------------------------------------------------------------------- #
# -- General information -- #
# --------------------------------------------------------------------------- #
def on_library_version(match, state):
"""It happens for the library version."""
log_cfg("Version of %s is %s" % (match[0], match[1]), state)
def on_participant_initial_peers(match, state):
"""It happens for the initial peers."""
initial_peers = [get_locator(peer, state) for peer in match[0].split(",")]
state['initial_peers'] = initial_peers
log_cfg("Initial peers: %s" % ", ".join(initial_peers), state)
def on_envvar_file_not_found(match, state):
"""It happens when the middleware cannot find an env var or file."""
log_cfg("%s %s not found" % (match[0].capitalize(), match[1]), state)
def on_envvar_file_found(match, state):
"""It happens when the middleware found an env var or file."""
log_cfg("%s %s found" % (match[0].capitalize(), match[1]), state)
|
iblancasa/rticonnextdds-logparser
|
src/events/events.py
|
Python
|
apache-2.0
| 14,138
|
"""
R objects as Python objects.
The module is structured around the singleton r of class R,
that represents an embedded R.
License: GPLv3.0 (although a dual license can be worked out)
"""
import os, sys
import array
import itertools
import rpy2.rinterface as rinterface
import rpy2.rlike.container as rlc
import conversion
from rpy2.robjects.robject import RObjectMixin, RObject
from rpy2.robjects.methods import RS4
from rpy2.robjects.vectors import *
from rpy2.robjects.functions import Function, SignatureTranslatedFunction
from rpy2.rinterface import NA_Real, NA_Integer, NA_Logical, NA_Character
_parse = rinterface.baseenv['parse']
_reval = rinterface.baseenv['eval']
# missing values
# keep them in vectors to keep compatibility with the 2.1.x series
NA_real = FloatVector((NA_Real, ))
NA_integer = IntVector((NA_Integer, ))
NA_bool = BoolVector((NA_Logical, ))
NA_character = StrVector((NA_Character, ))
NA_complex = ComplexVector(_reval(_parse(text = rinterface.StrSexpVector(("NA_complex_", )))))
# NULL
NULL = _reval(_parse(text = rinterface.StrSexpVector(("NULL", ))))
# TRUE/FALSE
TRUE = _reval(_parse(text = rinterface.StrSexpVector(("TRUE", ))))
FALSE = _reval(_parse(text = rinterface.StrSexpVector(("FALSE", ))))
#FIXME: close everything when leaving (check RPy for that).
def default_ri2py(o):
""" Convert :class:`rpy2.rinterface.Sexp` to higher-level objects,
without copying the R objects.
:param o: object
:rtype: :class:`rpy2.robjects.RObject (and subclasses)`
"""
res = None
try:
rcls = o.do_slot("class")[0]
except LookupError, le:
rcls = None
if isinstance(o, RObject):
res = o
elif isinstance(o, rinterface.SexpVector):
if rcls == 'data.frame':
res = vectors.DataFrame(o)
if res is None:
try:
dim = o.do_slot("dim")
if len(dim) == 2:
res = vectors.Matrix(o)
else:
res = vectors.Array(o)
except LookupError, le:
if o.typeof == rinterface.INTSXP:
if rcls == 'factor':
res = vectors.FactorVector(o)
else:
res = vectors.IntVector(o)
elif o.typeof == rinterface.REALSXP:
res = vectors.FloatVector(o)
elif o.typeof == rinterface.STRSXP:
res = vectors.StrVector(o)
elif o.typeof == rinterface.LANGSXP and rcls == 'formula':
res = Formula(o)
else:
res = vectors.Vector(o)
elif isinstance(o, rinterface.SexpClosure):
res = SignatureTranslatedFunction(o)
elif isinstance(o, rinterface.SexpEnvironment):
res = Environment(o)
elif isinstance(o, rinterface.SexpS4):
res = RS4(o)
else:
res = RObject(o)
return res
conversion.ri2py = default_ri2py
def default_py2ri(o):
""" Convert arbitrary Python object to :class:`rpy2.rinterface.Sexp` to objects,
creating an R object with the content of the Python object in the process
(wich means data copying).
:param o: object
:rtype: :class:`rpy2.rinterface.Sexp` (and subclasses)
"""
if isinstance(o, RObject):
res = rinterface.Sexp(o)
if isinstance(o, rinterface.Sexp):
res = o
elif isinstance(o, array.array):
if o.typecode in ('h', 'H', 'i', 'I'):
res = rinterface.SexpVector(o, rinterface.INTSXP)
elif o.typecode in ('f', 'd'):
res = rinterface.SexpVector(o, rinterface.REALSXP)
else:
raise(ValueError("Nothing can be done for this array type at the moment."))
elif isinstance(o, bool):
res = rinterface.SexpVector([o, ], rinterface.LGLSXP)
elif isinstance(o, int) or isinstance(o, long):
res = rinterface.SexpVector([o, ], rinterface.INTSXP)
elif isinstance(o, float):
res = rinterface.SexpVector([o, ], rinterface.REALSXP)
elif isinstance(o, str):
res = rinterface.SexpVector([o, ], rinterface.STRSXP)
elif isinstance(o, unicode):
res = rinterface.SexpVector([o, ], rinterface.STRSXP)
elif isinstance(o, list):
res = r.list(*[conversion.ri2py(conversion.py2ri(x)) for x in o])
elif isinstance(o, complex):
res = rinterface.SexpVector([o, ], rinterface.CPLXSXP)
else:
raise(ValueError("Nothing can be done for the type %s at the moment." %(type(o))))
return res
conversion.py2ri = default_py2ri
def default_py2ro(o):
""" Convert any Python object into an robject.
:param o: object
:rtype: :class:`rpy2.robjects.RObject (and subclasses)`
"""
res = conversion.py2ri(o)
return conversion.ri2py(res)
conversion.py2ro = default_py2ro
class Environment(RObjectMixin, rinterface.SexpEnvironment):
""" An R environement. """
def __init__(self, o=None):
if o is None:
o = rinterface.baseenv["new.env"](hash=rinterface.SexpVector([True, ], rinterface.LGLSXP))
super(Environment, self).__init__(o)
def __getitem__(self, item):
res = super(Environment, self).__getitem__(item)
res = conversion.ri2py(res)
res.__rname__ = item
return res
def __setitem__(self, item, value):
robj = conversion.py2ro(value)
super(Environment, self).__setitem__(item, robj)
def get(self, item, wantfun = False):
""" Get a object from its R name/symol
:param item: string (name/symbol)
:rtype: object (as returned by :func:`conversion.ri2py`)
"""
res = super(Environment, self).get(item, wantfun = wantfun)
res = conversion.ri2py(res)
res.__rname__ = item
return res
class Formula(RObjectMixin, rinterface.Sexp):
def __init__(self, formula, environment = rinterface.globalenv):
if isinstance(formula, str):
inpackage = rinterface.baseenv["::"]
asformula = inpackage(rinterface.StrSexpVector(['stats', ]),
rinterface.StrSexpVector(['as.formula', ]))
formula = rinterface.SexpVector(rinterface.StrSexpVector([formula, ]))
robj = asformula(formula,
env = environment)
else:
robj = formula
super(Formula, self).__init__(robj)
def getenvironment(self):
""" Get the environment in which the formula is finding its symbols."""
res = self.do_slot(".Environment")
res = conversion.ri2py(res)
return res
def setenvironment(self, val):
""" Set the environment in which a formula will find its symbols."""
if not isinstance(val, rinterface.SexpEnvironment):
raise ValueError("The environment must be an instance of" +
" rpy2.rinterface.Sexp.environment")
self.do_slot_assign(".Environment", val)
environment = property(getenvironment, setenvironment,
"R environment in which the formula will look for" +
" its variables.")
class R(object):
_instance = None
def __init__(self):
if R._instance is None:
rinterface.initr()
R._instance = self
else:
pass
#raise(RuntimeError("Only one instance of R can be created"))
def __getattribute__(self, attr):
try:
return super(R, self).__getattribute__(attr)
except AttributeError, ae:
orig_ae = ae
try:
return self[attr]
except LookupError, le:
raise orig_ae
def __getitem__(self, item):
res = rinterface.globalenv.get(item)
res = conversion.ri2py(res)
res.__rname__ = item
return res
#FIXME: check that this is properly working
def __cleanup__(self):
rinterface.endEmbeddedR()
del(self)
def __str__(self):
s = super(R, self).__str__()
s += os.linesep
version = self["version"]
tmp = [n+': '+val[0] for n, val in itertools.izip(version.names, version)]
s += str.join(os.linesep, tmp)
return s
def __call__(self, string):
p = self.parse(text=string)
res = self.eval(p)
return res
r = R()
globalenv = conversion.ri2py(rinterface.globalenv)
baseenv = conversion.ri2py(rinterface.baseenv)
emptyenv = conversion.ri2py(rinterface.emptyenv)
|
lbouma/Cyclopath
|
pyserver/bin/rpy2/robjects/__init__.py
|
Python
|
apache-2.0
| 8,577
|
import theano.tensor as T
def linrect(x):
return T.maximum(0,x)
def sigmoid(x):
return T.nnet.sigmoid(x)
def tanh(x):
return T.tanh(x)
def softmax(x):
return T.nnet.softmax(x)
def conv2d(*args,**kwargs):
return T.nnet.conv2d(*args,**kwargs)
def linear(x):
return x
|
terkkila/cgml
|
cgml/activations/activations.py
|
Python
|
apache-2.0
| 297
|
# -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2017
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
======================
Tests for tdi.markup
======================
TDI markup tests
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
|
ndparker/tdi3
|
tests/unit/markup/__init__.py
|
Python
|
apache-2.0
| 809
|
import argparse
import collections
import subprocess
DESCRIPTION = """Process a git repo's history and list out all the files in
order of their hotness.
Hotness is the number of lines that have been modified. This count excludes
changes due to creation/deletion of entire files. E.g. if a file is created
with 10 lines then 5 more are added, the hotness will be 5."""
def calculate_hotness(path, since=None, until=None):
"""
Calculates the hotness of the git repo at the given (git) path.
since and until are dates that will be passed through to git log without
change.
The return value will be a dictionary of paths to hotness (number of times
modified).
"""
log = _get_git_log(path, since=since, until=until)
commits = _group_into_commits(log)
# Process commits in reverse order so that deleted files are ignored.
commits = reversed(list(commits))
changes = [_parse_commit_change_size(commit) for commit in commits]
totals = _combine_changes(changes)
return totals
def _get_git_log(path, since, until):
"""
Gets the git log at the given path.
"""
args = ['git', 'log', '--summary', '--numstat', '--format=format:']
if since:
args.append('--since=%s' % since)
if until:
args.append('--until=%s' % until)
if path:
args.append(path)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
out = [line.rstrip() for line in p.stdout]
p.wait()
if p.returncode != 0:
raise Exception('Git failed')
return out
def _group_into_commits(log):
commit = None
for line in log:
if not line:
if commit:
yield commit
commit = []
else:
commit.append(line)
if commit:
yield commit
def _parse_commit_change_size(commit):
"""
Parses git's change stat format:
5 10 blah.py
Indicates 5 new lines, 10 removed lines in blah.py
This will exclude any changes due to creation.
If a file has been deleted it will be returned with a value of -1.
"""
result = {}
for line in commit:
if line.startswith(' delete'):
path = line.split(' ', 4)[-1]
result[path] = -1
elif line.startswith(' create'):
path = line.split(' ', 4)[-1]
if path in result:
del result[path]
elif line.startswith(' mode'):
continue
else:
try:
incr, decr, path = line.split('\t')
except ValueError:
print '\n'.join(commit)
raise
# binary files are specified as - - instead of number differences
if incr == '-' or decr == '-':
continue
result[path] = int(incr) + int(decr)
return result
def _combine_changes(commits):
result = collections.defaultdict(int)
for commit in commits:
for path, value in commit.iteritems():
if value == -1:
if path in result:
del result[path]
else:
result[path] += value
return result
def _process_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-path',
help='path to the git repo')
parser.add_argument('--since',
help='date to limit commits from')
parser.add_argument('--until',
help='date to limit commits until')
parser.add_argument('-n', type=int, default=20,
help='number of results to output. Specifying 0 will output all.'
+ ' Default 20')
return parser.parse_args()
def _main():
"""
Entry point when run on the console.
"""
args = _process_args()
hotness = calculate_hotness(
args.path or '',
since=args.since, until=args.until)
hotness = sorted(hotness.iteritems(), reverse=True, key=lambda item: item[1])
if args.n:
hotness = hotness[:args.n]
for path, value in hotness:
print path, '\t', value
if __name__ == '__main__':
_main()
|
colinhowe/hotfilefinder
|
hotfilefinder/__init__.py
|
Python
|
apache-2.0
| 4,081
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some utilities to identify Noto fonts and collect them into families"""
import argparse
import collections
import os
from os import path
import re
import sys
from fontTools import ttLib
from nototools import cldr_data
from nototools import coverage
from nototools import font_data
from nototools import lang_data
from nototools import notoconfig
from nototools import tool_utils
from nototools import unicode_data
# The '[xxx]' syntax is used to get the noto-xxx value from notoconfig.
# for now we exclude alpha, the phase 3 fonts are here but we don't use
# them yet.
NOTO_FONT_PATHS = [
'[fonts]/hinted', '[fonts]/unhinted', '[emoji]/fonts', '[cjk]']
ODD_SCRIPTS = {
'CJKjp': 'Jpan',
'CJKkr': 'Kore',
'CJKsc': 'Hans',
'CJKtc': 'Hant',
'JP': 'Jpan',
'KR': 'Kore',
'SC': 'Hans',
'TC': 'Hant',
'NKo': 'Nkoo',
'SumeroAkkadianCuneiform': 'Xsux',
'Symbols': 'Zsym',
'Emoji': 'Zsye',
}
def convert_to_four_letter(script_name):
"""Converts a script name from a Noto font file name to ISO 15924 code."""
if not script_name:
raise ValueError('empty script name')
if script_name in ODD_SCRIPTS:
return ODD_SCRIPTS[script_name]
script_code = unicode_data.script_code(script_name)
if script_code == 'Zzzz':
if len(script_name) != 4:
raise ValueError('no script for %s' % script_name)
print >> sys.stderr, 'defaulting script for %s' % script_name
script_code = script_name
return script_code
def preferred_script_name(script_key):
try:
return unicode_data.human_readable_script_name(script_key)
except:
return cldr_data.get_english_script_name(script_key)
_script_key_to_report_name = {
'Aran': '(Urdu)', # phase 2 usage
'HST': '(Historic)',
'LGC': '(LGC)',
'SYM2': 'Symbols2'
}
def script_name_for_report(script_key):
return (_script_key_to_report_name.get(script_key, None) or
preferred_script_name(script_key))
# NotoFont maps a font path to information we assume the font to have, based
# on Noto path and naming conventions:
# - filepath: the path name from which we derived the information
# - family: family name, e.g. 'Arimo', 'Noto'
# - style: type style, e.g. 'Sans', 'Serif', might be None
# - script: four-letter script code or 'private use' code like 'Aran', 'LGC',
# 'HST'
# - variant: script variant like 'UI' or Syriac variants like 'Estrangela'
# - width: width name ('Condensed') or None
# - weight: weight name
# - slope: slope name ('Italic') or None
# - fmt: 'ttf', 'otf', or 'otc'
# - manufacturer: 'Adobe', 'Google', 'Khmertype', or 'Monotype'
# - license_type: 'sil' or 'apache'
# - is_hinted: boolean, true if hinted
# - is_mono: boolean, true if monospace (currently CJK Latin range, or legacy
# LGC Mono)
# - is_display: boolean, true if display
# - is_UI: boolean, true if has UI metrics
# - is_cjk: boolean, true if a CJK font (from Adobe)
# - subset: name of cjk subset (KR, JA, SC, TC) for reduced-charset fonts
# targeted at these languages
NotoFont = collections.namedtuple(
'NotoFont',
'filepath, family, style, script, variant, width, weight, slope, '
'fmt, manufacturer, license_type, is_hinted, is_mono, is_UI, is_display, '
'is_cjk, subset')
WEIGHTS = {
'Thin': 100,
'ExtraLight': 200,
'Light': 300,
'DemiLight': 350,
'SemiLight': 350, # because currently some phase 3 fonts have this
'Regular': 400,
'Medium': 500,
'DemiBold': 600, # in case
'SemiBold': 600,
'Bold': 700,
'ExtraBold': 800,
'Black': 900
}
_FONT_NAME_REGEX = (
# family should be prepended - this is so Roboto can be used with unittests
# that use this regex to parse.
'(Sans|Serif|Naskh|Kufi|Nastaliq|Emoji|ColorEmoji)?'
'(Mono(?:space)?)?'
'(.*?)'
'(Eastern|Estrangela|Western|Slanted|New|Unjoined)?'
'(UI)?'
'(Display)?'
'-?'
'((?:Semi|Extra)?Condensed)?'
'(|%s)?' % '|'.join(WEIGHTS.keys()) +
'(Italic)?'
'\.(ttf|ttc|otf)')
_EXT_REGEX = re.compile(r'.*\.(?:ttf|ttc|otf)$')
def get_noto_font(filepath, family_name='Arimo|Cousine|Tinos|Noto',
phase=3):
"""Return a NotoFont if filepath points to a noto font, or None if we can't
process the path."""
filedir, filename = os.path.split(filepath)
if not filedir:
filedir = os.getcwd()
match = match_filename(filename, family_name)
if match:
(family, style, mono, script, variant, ui, display, width, weight,
slope, fmt) = match.groups()
else:
if _EXT_REGEX.match(filename):
print >> sys.stderr, '%s did not match font regex' % filename
return None
is_cjk = filedir.endswith('noto-cjk')
license_type = 'sil'
if script in ['JP', 'KR', 'TC', 'SC']:
subset = script
else:
subset = None
# Special-case emoji style
# (style can be None for e.g. Cousine, causing 'in' to fail, so guard)
if style and 'Emoji' in style:
script = 'Zsye'
if style == 'ColorEmoji':
style = 'Emoji'
variant = 'color'
is_mono = mono == 'Mono'
if width not in [None, '', 'Condensed', 'SemiCondensed', 'ExtraCondensed']:
print >> sys.stderr, 'noto_fonts: Unexpected width "%s"' % width
if width in ['SemiCond', 'Narrow']:
width = 'SemiCondensed'
elif width == 'Cond':
width = 'Condensed'
else:
width = '#'+ width + '#'
if not script:
if is_mono:
script = 'MONO'
else:
script = 'LGC'
elif script == 'Urdu':
# Use 'Aran' for languages written in the Nastaliq Arabic style, like Urdu.
# The font naming uses 'Urdu' which is not a script, but a language.
assert family == 'Noto' and style == 'Nastaliq'
script = 'Aran'
elif script == 'Historic':
script = 'HST'
elif script == 'CJK':
# leave script as-is
pass
elif script == 'Symbols2':
script = 'SYM2'
else:
try:
script = convert_to_four_letter(script)
except ValueError:
print >> sys.stderr, 'unknown script: %s for %s' % (script, filename)
return None
if not weight:
weight = 'Regular'
is_UI = ui == 'UI'
is_display = display == 'Display'
if is_cjk:
is_hinted = True
elif filedir.endswith('alpha') or 'emoji' in filedir:
is_hinted = False
else:
hint_status = path.basename(filedir)
if (hint_status not in ['hinted', 'unhinted']
and 'noto-source' not in filedir):
# print >> sys.stderr, (
# 'unknown hint status for %s, defaulting to unhinted') % filedir
pass
is_hinted = hint_status == 'hinted'
manufacturer = (
'Adobe' if is_cjk
else 'Google' if script == 'Zsye' and variant == 'color'
else 'Khmertype' if phase < 3 and script in ['Khmr', 'Cham', 'Laoo']
else 'Monotype')
return NotoFont(
filepath, family, style, script, variant, width, weight, slope, fmt,
manufacturer, license_type, is_hinted, is_mono, is_UI, is_display, is_cjk,
subset)
def match_filename(filename, family_name):
"""Match just the file name."""
return re.match('(%s)' % family_name + _FONT_NAME_REGEX, filename)
def parse_weight(name):
"""Parse the weight specifically from a name."""
match = re.search('|'.join(WEIGHTS.keys()), name)
if not match:
return 'Regular'
return match.group(0)
def script_key_to_scripts(script_key):
"""Return a set of scripts for a script key. The script key is used by
a font to define the set of scripts it supports. Some keys are ours,
e.g. 'LGC', and some are standard script codes that map to multiple
scripts, like 'Jpan'. In either case we need to be able to map a script
code (either unicode character script code, or more general iso script
code) to a font, and we do so by finding it in the list returned here."""
if script_key == 'LGC':
return frozenset(['Latn', 'Grek', 'Cyrl'])
elif script_key == 'Aran':
return frozenset(['Arab'])
elif script_key == 'HST':
raise ValueError('!do not know scripts for HST script key')
elif script_key == 'MONO':
# TODO: Mono doesn't actually support all of Latn, we need a better way
# to deal with pseudo-script codes like this one.
return frozenset(['Latn'])
else:
return lang_data.script_includes(script_key)
def script_key_to_primary_script(script_key):
"""We need a default script for a font, and fonts using a 'script key' support
multiple fonts. This lets us pick a default sample for a font based on it.
The sample is named with a script that can include 'Jpan' so 'Jpan' should be
the primary script in this case."""
if script_key == 'LGC':
return 'Latn'
if script_key == 'Aran':
return 'Arab'
if script_key == 'HST':
raise ValueError('!do not know scripts for HST script key')
if script_key == 'MONO':
return 'Latn'
if script_key not in lang_data.scripts():
raise ValueError('!not a script key: %s' % script_key)
return script_key
def noto_font_to_family_id(notofont):
# exclude 'noto-' from head of key, they all start with it except
# arimo, cousine, and tinos, and we can special-case those.
# For cjk with subset we ignore script and use 'cjk' plus the subset.
tags = []
if notofont.family != 'Noto':
tags.append(notofont.family)
if notofont.style:
tags.append(notofont.style)
if notofont.is_mono and not notofont.is_cjk:
tags.append('mono')
if notofont.is_cjk and notofont.subset:
tags.append('cjk')
tags.append(notofont.subset)
else:
tags.append(notofont.script)
if notofont.variant:
tags.append(notofont.variant)
key = '-'.join(tags)
return key.lower()
def noto_font_to_wws_family_id(notofont):
"""Return an id roughly corresponding to the wws family. Used to identify
naming rules for the corresponding fonts. Compare to noto_font_to_family_id,
which corresponds to a preferred family and is used to determine the language
support for those fonts. For example, 'Noto Sans Devanagari UI' and
'Noto Sans Devanagari' support the same languages (e.g. have the same cmap)
but have different wws family names and different name rules (names for the
UI variant use very short abbreviations)."""
id = noto_font_to_family_id(notofont)
if notofont.is_UI:
id += '-ui'
if notofont.is_display:
id += '-display'
return id
def get_noto_fonts(paths=NOTO_FONT_PATHS):
"""Scan paths for fonts, and create a NotoFont for each one, returning a list
of these. 'paths' defaults to the standard noto font paths, using notoconfig."""
font_dirs = filter(None, [tool_utils.resolve_path(p) for p in paths])
print 'Getting fonts from: %s' % font_dirs
all_fonts = []
for font_dir in font_dirs:
for filename in os.listdir(font_dir):
if not _EXT_REGEX.match(filename):
continue
filepath = path.join(font_dir, filename)
font = get_noto_font(filepath)
if not font:
print >> sys.stderr, 'bad font filename in %s: \'%s\'.' % (
(font_dir, filename))
continue
all_fonts.append(font)
return all_fonts
def get_font_family_name(font_file):
font = ttLib.TTFont(font_file, fontNumber=0)
name_record = font_data.get_name_records(font)
try:
name = name_record[16]
except KeyError:
name = name_record[1]
if name.endswith('Regular'):
name = name.rsplit(' ', 1)[0]
return name
# NotoFamily provides additional information about related Noto fonts. These
# fonts have weight/slope/other variations but have the same cmap, script
# support, etc. Most of this information is held in a NotoFont that is the
# representative member. Fields are:
# - name: name of the family
# - family_id: a family_id for the family
# - rep_member: the representative member, some of its data is common to all
# members
# - charset: the character set, must the the same for all members
# - hinted_members: list of members that are hinted
# - unhinted_members: list of members that are unhinted
# When both hinted_members and unhinted_members are present, they correspond.
NotoFamily = collections.namedtuple(
'NotoFamily',
'name, family_id, rep_member, charset, hinted_members, unhinted_members')
def get_families(fonts):
"""Group fonts into families, separate into hinted and unhinted, select
representative."""
family_id_to_fonts = collections.defaultdict(set)
families = {}
for font in fonts:
family_id = noto_font_to_family_id(font)
family_id_to_fonts[family_id].add(font)
for family_id, fonts in family_id_to_fonts.iteritems():
hinted_members = []
unhinted_members = []
rep_member = None
rep_backup = None # used in case all fonts are ttc fonts
for font in fonts:
if font.is_hinted:
hinted_members.append(font)
else:
unhinted_members.append(font)
if not rep_member:
if font.weight == 'Regular' and font.slope is None and not (
font.is_cjk and font.is_mono) and not font.is_UI:
# We assume here that there's no difference between a hinted or
# unhinted rep_member in terms of what we use it for. The other
# filters are to ensure the fontTools font name is a good stand-in
# for the family name.
if font.fmt == 'ttc' and not rep_backup:
rep_backup = font
else:
rep_member = font
rep_member = rep_member or rep_backup
if not rep_member:
raise ValueError(
'Family %s does not have a representative font.' % family_id)
name = get_font_family_name(rep_member.filepath)
if rep_member.fmt in {'ttf', 'otf'}:
charset = coverage.character_set(rep_member.filepath)
else:
# was NotImplemented, but bool(NotImplemented) is True
charset = None
families[family_id] = NotoFamily(
name, family_id, rep_member, charset, hinted_members, unhinted_members)
return families
def get_family_filename(family):
"""Returns a filename to use for a family zip of hinted/unhinted members.
This is basically the postscript name with weight/style removed.
"""
font = ttLib.TTFont(family.rep_member.filepath, fontNumber=0)
name_record = font_data.get_name_records(font)
try:
name = name_record[6]
ix = name.find('-')
if ix >= 0:
name = name[:ix]
except KeyError:
name = name_record[1]
if name.endswith('Regular'):
name = name.rsplit(' ', 1)[0]
name = name.replace(' ', '')
return name
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--dirs', help='list of directories to find fonts in',
metavar='dir', nargs='+',default=NOTO_FONT_PATHS)
args = parser.parse_args()
fonts = get_noto_fonts(paths=args.dirs)
for font in fonts:
print font.filepath
for attr in font._fields:
print ' %15s: %s' % (attr, getattr(font, attr))
if __name__ == "__main__":
main()
|
anthrotype/nototools
|
nototools/noto_fonts.py
|
Python
|
apache-2.0
| 15,453
|
"""
Wrapper over nilearn to visualise brain connectome
"""
import colorsys
import sys
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from nilearn import plotting
from maybrain import constants as ct
def plot_connectome(brain,
only_nodes=False,
node_property=None,
node_attributes=None,
node_size_property=None,
node_size_min=5,
node_size_max=150,
**kwargs):
"""
Wrapper over `nilearn.plotting.plot_connectome` to plot the connectome of a brain
(specifically `brain.G`).
Brain's nodes should have `constants.XYZ` attribute (spatial information) in the MNI space.
Parameters
----------
brain: maybrain.brain.Brain
An instance of the `Brain` class
only_nodes: bool
If True, only nodes will be plotted (no edges)
node_property: str
Property to look for in the nodes of brain.G in order to have different colours
for the nodes. Colours will be chosen depending on the values of those properties defined
in `node_attributes`. When defined, `node_color` attribute in
`nilearn.plotting.plot_connectome` is overridden.
node_attributes: list of str
It indicates how the nodes will be coloured according to `node_property`. For example,
if `node_property` is "hemisphere", and `node_attributes` is ["R", "L"], nodes will be
coloured with three colours: one when a node has the value "R" for the "hemisphere"
attribute, another colour when the value is "L", and another colour if the node has a value
which is not "L" or "R".
node_size_property: str
Property to look for in the nodes of brain.G in order to have different sizes
for the nodes. Sizes will be calculated based on the values for each node, scaled for
[node_size_min, node_size_max]. When defined, `node_size` attribute in
`nilearn.plotting.plot_connectome` is overridden.
node_size_min: int
The smallest node size. The node with the smallest value of property
`node_node_size_property` will have this size
node_size_max: int
The biggest node size. The node with the biggest value of property
`node_node_size_property` will have this size
kwargs
Keyword arguments if you need to pass them to nilearn's plot_connectome()
Returns
-------
display
the display from nilearn (return from nilearn's plot_connectome())
Raises
------
KeyError: Exception
If the edges don't have constants.XYZ property
"""
try:
if list(brain.G.nodes(data=True))[0][1][ct.XYZ]:
pass
except KeyError as error:
_, _, tbb = sys.exc_info()
raise KeyError(error, "Node doesn't have constants.XYZ property").with_traceback(tbb)
# Some values to get better plots than nilearn's defaults
if 'edge_cmap' not in kwargs:
kwargs['edge_cmap'] = plt.get_cmap('YlGnBu')
if 'node_color' not in kwargs:
kwargs['node_color'] = 'red'
connection_matrix = np.copy(nx.to_numpy_matrix(brain.G, nonedge=0))
if only_nodes:
connection_matrix[:] = 0
# If node_property is defined, let's create the custom colours
if node_property:
palette = []
# Creating a palette of colours based on the amount of
for i in range(len(node_attributes) + 1): # +1 to account for "other" regions
rgb = colorsys.hsv_to_rgb(i / (len(node_attributes) + 1), 1.0, 1.0)
palette.append('#%02x%02x%02x' % tuple([(round(255 * x)) for x in rgb]))
# Defining the colour for each node according to the attribute it has
colours = []
for n in brain.G.nodes(data=True):
we_r = np.where(np.array(node_attributes) == n[1][node_property])
if we_r[0].size > 0:
colours.append(palette[we_r[0][0]])
else:
colours.append(palette[-1]) # not found, so another colour
kwargs['node_color'] = colours
# If node_size_property is defined, let's define the size of each node
if node_size_property:
node_vals = [n[1][node_size_property] for n in brain.G.nodes(data=True)]
old_min = np.min(node_vals)
old_max = np.max(node_vals)
if old_min != old_max:
kwargs['node_size'] = []
for n in brain.G.nodes(data=True):
val = n[1][node_size_property]
new_val = ((val - old_min) / (old_max - old_min)) \
* (node_size_max - node_size_min) \
+ node_size_min
kwargs['node_size'].append(new_val)
else:
kwargs['node_size'] = node_size_min
return plotting.plot_connectome(connection_matrix,
list(dict(brain.G.nodes(data=ct.XYZ)).values()),
**kwargs)
|
RittmanResearch/maybrain
|
maybrain/plotting/connectome.py
|
Python
|
apache-2.0
| 5,028
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats
from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"Feature",},
)
class Feature(proto.Message):
r"""Feature Metadata information that describes an attribute of
an entity type. For example, apple is an entity type, and color
is a feature that describes apple.
Attributes:
name (str):
Immutable. Name of the Feature. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}``
The last part feature is assigned by the client. The feature
can be up to 64 characters long and can consist only of
ASCII Latin letters A-Z and a-z, underscore(_), and ASCII
digits 0-9 starting with a letter. The value will be unique
given an entity type.
description (str):
Description of the Feature.
value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType):
Required. Immutable. Type of Feature value.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this EntityType
was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this EntityType
was most recently updated.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]):
Optional. The labels with user-defined
metadata to organize your Features.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
on and examples of labels. No more than 64 user
labels can be associated with one Feature
(System labels are excluded)."
System reserved label keys are prefixed with
"aiplatform.googleapis.com/" and are immutable.
etag (str):
Used to perform a consistent
read-modify-write updates. If not set, a blind
"overwrite" update happens.
monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig):
Optional. The custom monitoring configuration for this
Feature, if not set, use the monitoring_config defined for
the EntityType this Feature belongs to. Only Features with
type
([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType])
BOOL, STRING, DOUBLE or INT64 can enable monitoring.
If this is populated with
[FeaturestoreMonitoringConfig.disabled][] = true, snapshot
analysis monitoring is disabled; if
[FeaturestoreMonitoringConfig.monitoring_interval][]
specified, snapshot analysis monitoring is enabled.
Otherwise, snapshot analysis monitoring config is same as
the EntityType's this Feature belongs to.
monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]):
Output only. A list of historical [Snapshot
Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis]
stats requested by user, sorted by
[FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
descending.
"""
class ValueType(proto.Enum):
r"""An enum representing the value type of a feature."""
VALUE_TYPE_UNSPECIFIED = 0
BOOL = 1
BOOL_ARRAY = 2
DOUBLE = 3
DOUBLE_ARRAY = 4
INT64 = 9
INT64_ARRAY = 10
STRING = 11
STRING_ARRAY = 12
BYTES = 13
name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
value_type = proto.Field(proto.ENUM, number=3, enum=ValueType,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
etag = proto.Field(proto.STRING, number=7,)
monitoring_config = proto.Field(
proto.MESSAGE,
number=9,
message=featurestore_monitoring.FeaturestoreMonitoringConfig,
)
monitoring_stats = proto.RepeatedField(
proto.MESSAGE, number=10, message=feature_monitoring_stats.FeatureStatsAnomaly,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleapis/python-aiplatform
|
google/cloud/aiplatform_v1beta1/types/feature.py
|
Python
|
apache-2.0
| 5,547
|
#!/usr/bin/env python
# Copyright 2013-2015 David Mohr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
from subprocess import check_output, Popen, PIPE
import time
import functools
import numpy
import pystache
from . import * # noqa
import stella
opt = 3
min_speedup = 0.75
def ccompile(fn, src, cc=None, flags={}):
"""
Write the string src into the file fn, then compile it with -O{opt} and
return the executable name.
"""
with open(fn, 'w') as f:
f.write(src)
if 'c' not in flags:
flags['c'] = []
if 'ld' not in flags:
flags['ld'] = []
if cc is None:
if 'CC' in os.environ:
CC = os.environ['CC']
else:
CC = 'gcc'
else:
CC = cc
(root, ext) = os.path.splitext(fn)
if os.path.exists(root):
os.unlink(root)
obj = root + ".o"
if os.path.exists(obj):
os.unlink(obj)
with open(fn, 'rb') as f:
sourcecode = f.read()
# the following three cmds are equivalent to
# [CC, '-Wall', '-O' + str(opt)] + flags + ['-o', root, fn]
cmd = [CC] + flags['c'] + ['-Wall', '-E', '-o', '-', '-']
print("Preprocessing: {0}".format(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
preprocessed, serr = p.communicate(timeout=30, input=sourcecode)
assert (not serr or not serr.decode())
# start with C input, generate assembly
cmd = [CC, '-Wall'] + flags['c'] + ['-x', 'cpp-output', '-S',
'-O' + str(opt), '-o', '-', '-']
print("Compiling to assembly: {0}".format(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
time_start = time.time()
sout, serr = p.communicate(timeout=30, input=preprocessed)
elapsed = time.time() - time_start
assert not serr.decode()
cmd = [CC] + flags['ld'] + ['-o', root, '-x', 'assembler', '-']
print("Compiling to machine code & linking: {0}".format(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
sout, serr = p.communicate(timeout=30, input=sout)
assert (not serr or not serr.decode()) and (not sout or not sout.decode())
return root, elapsed
def bench_it(name, c_src, args, extended, parse_f, verify_f,
stella_f=None, full_f=None,
flags={}):
"""args = {k=v, ...}
Args gets expanded to `k`_init: `k`=`v` for the C template
"""
if not stella_f and not full_f:
raise Exception(
"Either need to specify stella_f(*arg_value) or full_f(args, stats)")
t_run = {}
t_compile = {}
c_args = {k+'_init': k+'='+str(v) for k, v in args.items()}
print("Doing {0}({1})".format(name, args))
src = pystache.render(c_src, **c_args)
if extended:
CCs = ['gcc', 'clang']
else:
CCs = ['gcc']
results = {}
for cc in CCs:
exe, elapsed_compile = ccompile(__file__ + "." + name + ".c", src, cc, flags)
t_compile[cc] = elapsed_compile
cmd = [exe]
print("Running C/{}: {}".format(cc, " ".join(cmd)))
time_start = time.time()
out = check_output(cmd, universal_newlines=True)
print(out)
results[cc] = parse_f(out)
elapsed_c = time.time() - time_start
t_run[cc] = elapsed_c
print("Running Stella:")
stats = {}
wrapper_opts = {'debug': False, 'opt': opt, 'stats': stats}
if stella_f:
arg_values = args.values()
time_start = time.time()
res = stella.wrap(stella_f, **wrapper_opts)(*arg_values)
elapsed_stella = time.time() - time_start
else:
elapsed_stella, res = full_f(args, stella.wrap, wrapper_opts)
results['stella'] = res
t_run['stella'] = stats['elapsed']
# TODO no need to keep track of the combined time, is there?
# t_run['stella+compile'] = elapsed_stella
t_compile['stella'] = elapsed_stella - stats['elapsed']
if extended > 1:
print("\nRunning Python:")
if stella_f:
time_start = time.time()
res = stella_f(*[v for k, v in args.items()])
elapsed_py = time.time() - time_start
else:
elapsed_py, res = full_f(args, time_stats, wrapper_opts)
t_run['python'] = elapsed_py
results['python'] = res
# verify results are identical
it = iter(results.keys())
k1 = next(it)
for k2 in it:
print('Verify:', k1, '==', k2)
verify_f(results[k1], results[k2])
k1 = k2
return {'run': t_run, 'compile': t_compile}
def fib_prepare(f):
@functools.wraps(f)
def prepare(args):
return (f, (args['x'], ), lambda r, x: r)
return prepare
def fib_parse(out):
print (out)
return int(out.strip())
def fib_verify(a, b):
assert a == b
def bench_fib(duration, extended):
from .langconstr import fib
args = {'x': duration}
return bench_vs_template(fib_prepare(fib), extended, 'fib', args,
parse_f=fib_parse, verify_f=fib_verify)
def bench_fib_nonrecursive(duration, extended):
from .langconstr import fib_nonrecursive
args = {'x': duration}
return bench_vs_template(fib_prepare(fib_nonrecursive), extended, 'fib_nonrecursive', args,
parse_f=fib_parse, verify_f=fib_verify)
def bench_vs_template(prepare, extended, name, args, parse_f, verify_f, flags={}):
fn = "{}/template.{}.{}.c".format(os.path.dirname(__file__),
os.path.basename(__file__),
name)
with open(fn) as f:
src = f.read()
def run_it(args, wrapper, wrapper_opts):
run_f, transfer, result_f = prepare(args)
if transfer is None:
transfer = []
time_start = time.time()
r = wrapper(run_f, **wrapper_opts)(*transfer)
elapsed_stella = time.time() - time_start
return elapsed_stella, result_f(r, *transfer)
return bench_it(name, src, args, extended, flags=flags, full_f=run_it,
parse_f=parse_f, verify_f=verify_f)
def bench_si1l1s(module, extended, suffix, duration):
def parse(out):
return numpy.array(list(map(float, out.strip()[1:-1].split(' '))))
def verify(a, b):
assert (a == b).all()
args = {'seed': int(time.time() * 100) % (2**32),
'rununtiltime': duration
}
return bench_vs_template(module.prepare, extended, 'si1l1s_' + suffix, args,
flags={'ld': ['-lm']},
parse_f=parse, verify_f=verify)
def bench_si1l1s_globals(duration, extended):
from . import si1l1s_globals
return bench_si1l1s(si1l1s_globals, extended, 'globals', duration)
def bench_si1l1s_struct(duration, extended):
from . import si1l1s_struct
return bench_si1l1s(si1l1s_struct, extended, 'struct', duration)
def bench_si1l1s_obj(duration, extended):
from . import si1l1s_obj
# reuse the 'struct' version of C since there is no native OO
return bench_si1l1s(si1l1s_obj, extended, 'struct', duration)
def bench_nbody(n, extended):
from . import nbody
def parse(out):
return list(map(float, out.strip().split('\n')))
def verify(a, b):
fmt = "{:8f}"
for x, y in zip(a, b):
assert fmt.format(x) == fmt.format(y)
args = {'n': n,
'dt': 0.01,
}
return bench_vs_template(nbody.prepare, extended, 'nbody', args, flags={'ld': ['-lm']},
parse_f=parse, verify_f=verify)
def bench_heat(n, extended):
from . import heat
def parse(out):
rows = out.strip().split('\n')
r = numpy.zeros(shape=(len(rows), 5))
for i, row in enumerate(rows):
for j, v in enumerate(row.split()):
r[i, j] = v
return r
def verify(a, b):
for i, row in enumerate(abs(a - b)):
assert (row < delta).all()
args = {'nsteps': n}
return bench_vs_template(heat.prepare, extended, 'heat', args,
flags={'ld': ['-lm'], 'c': ['-std=c99']},
parse_f=parse, verify_f=verify)
def speedup(bench):
return bench['run']['gcc'] / bench['run']['stella']
@bench
def test_fib(bench_result, bench_opt, bench_ext):
duration = [30, 45, 48][bench_opt]
bench_result['fib'] = bench_fib(duration, bench_ext)
assert speedup(bench_result['fib']) >= min_speedup
@mark.skipif(True, reason="Runs too fast to be a useful benchmark")
def test_fib_nonrecursive(bench_result, bench_opt, bench_ext):
duration = [50, 150, 175][bench_opt]
bench_result['fib_nonrec'] = bench_fib_nonrecursive(duration, bench_ext)
assert speedup(bench_result['fib_nonrec']) >= min_speedup
si1l1s_durations = ['1e5', '1e8', '1.2e9']
@bench
def test_si1l1s_globals(bench_result, bench_opt, bench_ext):
duration = si1l1s_durations[bench_opt]
bench_result['si1l1s_global'] = bench_si1l1s_globals(duration, bench_ext)
assert speedup(bench_result['si1l1s_global']) >= min_speedup
@bench
def test_si1l1s_struct(bench_result, bench_opt, bench_ext):
duration = si1l1s_durations[bench_opt]
bench_result['si1l1s_struct'] = bench_si1l1s_struct(duration, bench_ext)
assert speedup(bench_result['si1l1s_struct']) >= min_speedup
@bench
def test_si1l1s_obj(bench_result, bench_opt, bench_ext):
duration = si1l1s_durations[bench_opt]
bench_result['si1l1s_obj'] = bench_si1l1s_obj(duration, bench_ext)
assert speedup(bench_result['si1l1s_obj']) >= min_speedup
@bench
def test_nbody(bench_result, bench_opt, bench_ext):
duration = [250000, 10000000, 100000000][bench_opt]
bench_result['nbody'] = bench_nbody(duration, bench_ext)
assert speedup(bench_result['nbody']) >= min_speedup
@bench
def test_heat(bench_result, bench_opt, bench_ext):
duration = [13, 3000, 50000][bench_opt]
bench_result['heat'] = bench_heat(duration, bench_ext)
assert speedup(bench_result['heat']) >= min_speedup
|
squisher/stella
|
stella/test/benchmark.py
|
Python
|
apache-2.0
| 10,596
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.actions import base
class AnswerSubmit(base.BaseLearnerActionSpec):
"""Learner action that's recorded when an answer is submitted."""
_customization_arg_specs = [{
'name': 'state_name',
'description': 'State name',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'dest_state_name',
'description': 'Destination state name',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'interaction_id',
'description': 'ID of the interaction',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'submitted_answer',
'description': 'Submitted answer',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'feedback',
'description': 'Feedback for the submitted answer',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'time_spent_state_in_msecs',
'description': 'Time spent in state in milliseconds',
'schema': {
'type': 'int',
},
'default_value': 0
}]
|
AllanYangZhou/oppia
|
extensions/actions/AnswerSubmit/AnswerSubmit.py
|
Python
|
apache-2.0
| 1,907
|
#!/usr/bin/env python
__author__ = 'ilkin safarli'
import unittest
from FrequencyTransformer import *
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
counts = count_vect.fit_transform(["hello world"])
class TestFrequencyTransformer(unittest.TestCase):
def test_fit(self):
x = FrequencyTransformer(count_vect)
self.assertEqual(x, x.fit())
def test_transform(self):
global count_vect
x = FrequencyTransformer(count_vect)
c = sparse.csc_matrix([1, 1])
boolean = (c-x.transform(counts.toarray())).nnz == 0
self.assertEqual(boolean, True)
if __name__ == '__main__':
unittest.main()
|
kinimesi/rscore
|
unit_tests/test_FrequencyTransformer.py
|
Python
|
apache-2.0
| 645
|
# Copyright 2015 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import timeutils
# noinspection PyPackageRequirements
from oslotest import base
import sys
from testtools import matchers
from testtools.matchers import HasLength
from vitrage.common import config
CONF = cfg.CONF
IsEmpty = lambda: HasLength(0)
class BaseTest(base.BaseTestCase):
"""Test case base class for all unit tests."""
def conf_reregister_opts(self, opts, group=None):
self.conf.reset()
if group in self.conf:
for opt in opts:
self.conf.unregister_opt(opt, group=group)
self.conf.register_opts(opts, group=group)
def unregister_opts():
self.conf.reset()
for opt in opts:
self.conf.unregister_opt(opt, group=group)
self.addCleanup(unregister_opts)
def setUp(self):
super(BaseTest, self).setUp()
self.cfg_fixture = self.useFixture(
config_fixture.Config(CONF))
config.parse_config([])
logging.disable(logging.CRITICAL)
self.conf = self.cfg_fixture.conf
def config(self, **kw):
self.cfg_fixture.config(**kw)
def assert_list_equal(self, l1, l2, message=None):
if tuple(sys.version_info)[0:2] < (2, 7):
# for python 2.6 compatibility
self.assertEqual(l1, l2, message)
else:
super(BaseTest, self).assertListEqual(l1, l2, message)
def assert_dict_equal(self, d1, d2, message=None):
if tuple(sys.version_info)[0:2] < (2, 7):
# for python 2.6 compatibility
self.assertEqual(d1, d2)
else:
super(BaseTest, self).assertDictEqual(d1, d2, message)
def assert_timestamp_equal(self, first, second, msg=None):
"""Checks that two timestamps are equals.
This relies on assertAlmostEqual to avoid rounding problem, and only
checks up the first microsecond values.
"""
return self.assertAlmostEqual(timeutils.delta_seconds(first, second),
0.0,
places=5, msg=msg)
def assert_is_empty(self, obj):
try:
if len(obj) != 0:
self.fail("%s is not empty" % type(obj))
except (TypeError, AttributeError):
self.fail("%s doesn't have length" % type(obj))
def assert_is_not_empty(self, obj):
try:
if len(obj) == 0:
self.fail("%s is empty" % type(obj))
except (TypeError, AttributeError):
self.fail("%s doesn't have length" % type(obj))
def assert_graph_equal(self, g1, g2):
"""Checks that two graphs are equals.
This relies on assert_dict_equal when comparing the nodes and the
edges of each graph.
"""
g1_nodes = g1._g.nodes
g1_edges = g1._g.adj
g2_nodes = g2._g.nodes
g2_edges = g2._g.adj
self.assertEqual(g1.num_vertices(), g2.num_vertices(),
"Two graphs have different amount of nodes")
self.assertEqual(g1.num_edges(), g2.num_edges(),
"Two graphs have different amount of edges")
for n_id in g1_nodes:
self.assert_dict_equal(g1_nodes.get(n_id),
g2_nodes.get(n_id),
"Nodes of each graph are not equal")
for e_source_id in g1_edges:
self.assert_dict_equal(dict(g1_edges.get(e_source_id)),
dict(g2_edges.get(e_source_id)),
"Edges of each graph are not equal")
def assert_starts_with(self, expected_prefix, observed_str, msg=None):
self.assertThat(observed_str,
matchers.StartsWith(expected_prefix), msg)
@staticmethod
def path_get(project_file=None):
root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'..',
)
)
if project_file:
return os.path.join(root, project_file)
else:
return root
|
openstack/vitrage
|
vitrage/tests/base.py
|
Python
|
apache-2.0
| 4,909
|
# Brett (Berty) Fischler and Hunter (Kenneth) Wapman
# October 2014
# kNN Implementation for Senior Design Project
from collections import Counter
import sets
import math
import sys
import os
from math import isinf
# Minimum normalized RSSI value detected; used as "not detected" value
MIN_DETECTED = 0
COEFF_JACCARD = 3
COEFF_EUCLIDEAN = 1
COEFF_DENSITY = .112
MAC_COUNTS = {}
#########################
### CLASS DEFINITIONS ###
#########################
# Access Point class
class AccessPoint(object):
def __init__(self, ap, from_django=False):
if not from_django:
self.mac = ap[0]
self.strength_dbm = float(ap[1])
self.strength = self.strength_dbm
#self.strength = 10 ** (float(ap[1]) / 10)
self.std = 10 ** (float(ap[2]) / 10)
self.datetime = ap[3]
else:
self.mac = ap['mac_address']
self.strength_dbm = ap['signal_strength']
self.strength = self.strength_dbm
#self.strength = 10 ** (self.strength_dbm / 10)
self.std = 10 ** (ap['standard_deviation'] / 10)
self.datetime = ap['recorded']
# Location Class
# TODO: Look into storing previous distance calculations
class Location(object):
def __init__(self, loc):
self.x = loc[0]
self.y = loc[1]
self.direction = loc[2]
self.floor_id = loc[3]
self.init_aps(loc[4])
def printLoc(self):
sys.stdout.write("Location: (x, y) = (" + str(self.x) + ", " + str(self.y) + \
"), Floor = " + str(self.floor_id) + "\n")
# Stores Access Points in a {mac_id : AccessPoint} dictionary
def init_aps(self, aps):
self.aps = {}
for ap in aps:
self.aps[ap[0]] = AccessPoint(ap)
##########################
### DISTANCE FUNCTIONS ###
##########################
# Returns a set of shared keys between the two given AP dictionaries
def getSharedKeys(aps1, aps2):
keys = sets.Set()
for mac_id in aps1.keys():
keys.add(mac_id)
for mac_id in aps2.keys():
keys.add(mac_id)
return keys
#Calculates distance between this Location and the given dictionary of
#AccessPoints (currently calls function to calculate Euclidean distance)
def kNNDistance(aps1, aps2, density = 0):
distances = []
euc_dist = euclidean(aps1, aps2)
jaccard = jaccardDistance(aps1, aps2)
if jaccard == 0:
return float("INF")
#return 1 / percent_shared + 1.5 * euc_dist
return (COEFF_JACCARD / jaccard) + (COEFF_EUCLIDEAN * euc_dist) + (COEFF_DENSITY * density)
# Given two dictionaries of AccessPoints, calculates the
# Euclidean distance between the two dictionaries
def euclidean(aps1, aps2):
global MIN_DETECTED
keys = getSharedKeys(aps1, aps2)
rVal = 0
for key in keys:
strength1 = MIN_DETECTED
if key in aps1:
strength1 = aps1[key].strength
strength2 = MIN_DETECTED
if key in aps2:
strength2 = aps2[key].strength
rVal = rVal + ((strength1 - strength2) ** 2)
return math.sqrt(rVal)
def jaccardDistance(aps1, aps2):
count = 0
for ap in aps2.values():
if ap.mac in aps1.keys():
count += 1
intersection = count
union = len(aps1.keys()) + len(aps2.keys()) - count
return float(intersection) / union
def realDistance(d1, d2):
if d1 is None or d2 is None:
return 0
return math.sqrt(pow(d1.x - d2.x, 2) + pow(d1.y - d2.y, 2))
# Given a list of tuples where t[0] is the value and t[1] is the distance,
# returns a weighted average of the values
def weighted_avg(tuples, inverse):
### If we want the unweighted average:
#return sum([t[0] for t in tuples]) / len(tuples)
s = 0
for t in tuples:
if t[1] == 0:
return t[0]
if inverse:
weight_sum = sum([1 / t[1] for t in tuples])
else:
weight_sum = sum([t[1] for t in tuples])
for t in tuples:
if isinf(t[1]) or weight_sum == 0:
#print t[0]
return t[0]
if inverse:
s += t[0] * (1 / t[1]) / weight_sum
else:
s += t[0] * t[1] / weight_sum
return s
# Uses k - Nearest Neighbor technique to get the coordinates associated with
# the given AccessPoint dictionary
def apply_kNN(data, aps, k = 4, element = None):
k = min(k, len(data))
floor = getFloor(data, aps)
for d in data:
if d.floor_id == floor:
d.distance = kNNDistance(d.aps, aps, density=d.density)
#d.distance = euclidean(d.aps, aps)
#d.distance = 1 / jaccardDistance(d.aps, aps)
else:
d.distance = float("INF")
data = sorted(data, key=lambda x: x.distance)
#data = sorted(data, key=lambda x: realDistance(element, x))
#for d in data:
# print jaccardDistance(aps, d.aps), "->", euclidean(aps, d.aps), "->", kNNDistance(aps, d.aps, density=d.density), "->", d.density, "->", realDistance(d, element)
x = weighted_avg([(loc.x, loc.distance) for loc in data[:k]], True)
y = weighted_avg([(loc.y, loc.distance) for loc in data[:k]], True)
return (x, y, floor, data[:k])
def getFloor(data, aps, k = 5):
k = min(k, len(data))
data = sorted(data, key=lambda d: jaccardDistance(d.aps, aps), reverse=True)
d = Counter([loc.floor_id for loc in data[:k]])
floor = d.most_common(1)[0][0]
return floor
# Returns a list of Locations and an AccessPoint dictionary
def get_locations(data):
locations = []
#sys.stderr.write("LENGTH: " + str(len(data)) + "\n")
for d in data:
cur_macs = d["macs"]
cur_rss = d["rss"]
cur_aps = []
for i in range(len(cur_macs)):
cur_aps.append((cur_macs[i], cur_rss[i], 0, 0))
locations.append((d["x"], d["y"], d["direction"], d["floor_id"], cur_aps))
return [Location(i) for i in locations]
##########################
### GET DATA FUNCTIONS ###
##########################
def getData(db_cursor=None):
if db_cursor is None:
from scripts.db.db import Database
password = os.environ.get('SIRIUS_PASSWORD')
if password is None:
raise Exception('No database password available')
db = Database(password)
cur = db.get_cur()
else:
cur = db_cursor
cur.execute("""SELECT floor_id,marauder_accesspoint.location_id, x_coordinate, y_coordinate, direction,
array_to_string(array_agg(mac_address),',') as MAC_list,
array_to_string(array_agg(signal_strength),',') as strength_list
from marauder_accesspoint
join marauder_location
on marauder_location.id=marauder_accesspoint.location_id
group by floor_id,marauder_accesspoint.location_id,x_coordinate,y_coordinate,direction""")
access_points = cur.fetchall()
res = []
for f in access_points:
msg = {
'floor_id': f[0],
'location_id': f[1],
'x': f[2],
'y': f[3],
'direction': f[4],
'macs': f[5].split(','),
'rss': map(float, f[6].split(','))
}
res.append(msg)
return res
###############################
### NORMALIZATION FUNCTIONS ###
###############################
# Returns the standard deviation of the given list
def get_sd(l):
mean = get_mean(l)
rVal = 0
for elem in l:
rVal += (elem - mean) ** 2
return (rVal / (len(l) - 1)) ** .5
# Returns the mean of the given list
def get_mean(l):
return sum(l) / len(l)
# BRETT NORMALIZE FUNCTION
def normalize_all_data(data, testdata):
global MIN_DETECTED
global MAC_COUNTS
strengths = []
for loc in data:
for ap in loc.aps.values():
strengths.append(ap.strength)
if ap.mac not in MAC_COUNTS.keys():
MAC_COUNTS[ap.mac] = 0
MAC_COUNTS[ap.mac] += 1
mean = get_mean(strengths)
st_dev = get_sd(strengths)
for loc in data:
for ap in loc.aps.values():
ap.strength = (ap.strength - mean) / st_dev
if ap.strength < MIN_DETECTED:
MIN_DETECTED = ap.strength
for loc in testdata:
for ap in loc.aps.values():
ap.strength = (ap.strength - mean) / st_dev
if ap.mac not in MAC_COUNTS.keys():
MAC_COUNTS[ap.mac] = 0
MAC_COUNTS[ap.mac] += 1
def normalize_all_data2(data, testdata):
global MIN_DETECTED
global MAC_COUNTS
minstrength = sys.maxint
maxstrength = -1 * sys.maxint - 1
strengths = []
for loc in data:
for ap in loc.aps.values():
strengths.append(ap.strength)
if ap.strength < minstrength:
minstrength = ap.strength
if ap.strength > maxstrength:
maxstrength = ap.strength
if ap.mac not in MAC_COUNTS.keys():
MAC_COUNTS[ap.mac] = 0
MAC_COUNTS[ap.mac] += 1
for loc in data:
for ap in loc.aps.values():
ap.strength = (ap.strength - minstrength) / (maxstrength - minstrength)
if MIN_DETECTED > ap.strength:
MIN_DETECTED = ap.strength
for loc in testdata:
for ap in loc.aps.values():
ap.strength = (ap.strength - minstrength) / (maxstrength - minstrength)
##########################
### ANALYSIS FUNCTIONS ###
##########################
def error(element, x, y, floor):
if element.floor_id == 6 and floor != 2:
return -1
elif element.floor_id == 15 and floor != 1:
return -1
elif element.floor_id < 3 and element.floor_id != floor:
return -1
else:
dist = math.sqrt(pow(element.x - x, 2) + pow(element.y - y, 2))
return dist
def addDensities(data):
for i in range(len(data)):
count = 1
loc1 = data[i]
for j in range(len(data)):
loc2 = data[j]
if i == j or loc1.floor_id != loc2.floor_id:
continue
if loc1.floor_id == 1:
den_threshold = 9.555 * 10
else:
den_threshold = 14.764 * 10
if realDistance(loc1,loc2) < den_threshold:
count += 1
loc1.density = count
def testAccuracy(data, testdata):
wrong_floor_count = 0
error_total = 0
distances = [0] * 10 # [0-1 meter, 1-2, 2-3, etc]
# 7, 8, 12, 16
#our_points = [2, 4, 6, 7, 13, 17]
our_points = range(19)
testdata = [d for (i, d) in enumerate(testdata) if i in our_points]
for i in range(len(testdata)):
element = testdata[i]
aps = element.aps
(x, y, floor, neighbors) = apply_kNN(data, aps, element = element)
cur_error = error(element, x, y, floor)
if cur_error == -1:
print "Wrong floor"
wrong_floor_count += 1
else:
print element.x, element.y, x, y
for n in neighbors:
print n.x, n.y, n.density
#For Halligan_2.png, 14.764px ~= 1 meter
#For Halligan_1.png 9.555px ~= 1 meter
if floor == 1: #id NOT FLOOR NUMBER!!
print i, cur_error / 14.764
error_total += cur_error / 14.764
distances[min(int(cur_error / 14.764), 9)] += 1
else:
# if cur_error / 9.555 > 9:
# print i, cur_error / 9.555
error_total += cur_error / 9.555
distances[min(int(cur_error / 9.555), 9)] += 1
print "FOR " + str(len(testdata)) + " POINTS:"
print "Incorrect Floor Count:", wrong_floor_count
print "Avg error: " + str(float(error_total) / (len(testdata) - wrong_floor_count)) + "m"
print "Distances:", distances
print ""
return float(error_total) / len(testdata)
if __name__ == "__main__":
sql_data = getData()
all_data = get_locations(sql_data)
data = [d for d in all_data if d.floor_id < 3]
testdata = [d for d in all_data if d.floor_id == 7]
addDensities(data)
normalize_all_data(data, testdata)
testAccuracy(data, testdata)
sys.exit(1)
global COEFF_DENSITY
global COEFF_JACCARD
COEFF_DENSITY = 0
COEFF_JACCARD = 3
best_density = 0
best_jaccard =0
best_val = 100
for i in range(1):
COEFF_DENSITY = .1
for j in range(10):
cur_error = testAccuracy(data, testdata)
if cur_error < best_val:
best_density = COEFF_DENSITY
best_jaccard = COEFF_JACCARD
best_val = cur_error
COEFF_DENSITY += .004
COEFF_JACCARD += .2
print "BEST ERROR:", best_val
print "BEST COEFFS:", best_jaccard, best_density
|
TeamSirius/Utilities
|
kNN2.py
|
Python
|
apache-2.0
| 12,752
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.unit.integrated.v3 import api_sample_base
class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-floating-ip-dns"
domain = 'domain1.example.org'
name = 'instance1'
scope = 'public'
project = 'project1'
dns_type = 'A'
ip = '192.168.1.1'
def _create_or_update(self):
subs = {'project': self.project,
'scope': self.scope}
response = self._do_put('os-floating-ip-dns/%s' % self.domain,
'floating-ip-dns-create-or-update-req', subs)
subs.update({'domain': self.domain})
self._verify_response('floating-ip-dns-create-or-update-resp', subs,
response, 200)
def _create_or_update_entry(self):
subs = {'ip': self.ip, 'dns_type': self.dns_type}
response = self._do_put('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name),
'floating-ip-dns-create-or-update-entry-req',
subs)
subs.update({'name': self.name, 'domain': self.domain})
self._verify_response('floating-ip-dns-create-or-update-entry-resp',
subs, response, 200)
def test_floating_ip_dns_list(self):
self._create_or_update()
response = self._do_get('os-floating-ip-dns')
subs = {'domain': self.domain,
'project': self.project,
'scope': self.scope}
self._verify_response('floating-ip-dns-list-resp', subs,
response, 200)
def test_floating_ip_dns_create_or_update(self):
self._create_or_update()
def test_floating_ip_dns_delete(self):
self._create_or_update()
response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
self.assertEqual(response.status_code, 202)
def test_floating_ip_dns_create_or_update_entry(self):
self._create_or_update_entry()
def test_floating_ip_dns_entry_get(self):
self._create_or_update_entry()
response = self._do_get('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name))
subs = {'domain': self.domain,
'ip': self.ip,
'name': self.name}
self._verify_response('floating-ip-dns-entry-get-resp', subs,
response, 200)
def test_floating_ip_dns_entry_delete(self):
self._create_or_update_entry()
response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.name))
self.assertEqual(response.status_code, 202)
def test_floating_ip_dns_entry_list(self):
self._create_or_update_entry()
response = self._do_get('os-floating-ip-dns/%s/entries/%s'
% (self.domain, self.ip))
subs = {'domain': self.domain,
'ip': self.ip,
'name': self.name}
self._verify_response('floating-ip-dns-entry-list-resp', subs,
response, 200)
|
luzheqi1987/nova-annotation
|
nova/tests/unit/integrated/v3/test_floating_ip_dns.py
|
Python
|
apache-2.0
| 3,759
|
# -*- coding: utf-8 -*-
'''
Manage the information in the hosts file
'''
# Import python libs
import os
# Import salt libs
import salt.utils
import salt.utils.odict as odict
# pylint: disable=C0103
def __get_hosts_filename():
'''
Return the path to the appropriate hosts file
'''
# TODO: Investigate using "%SystemRoot%\system32" for this
if salt.utils.is_windows():
return 'C:\\Windows\\System32\\drivers\\etc\\hosts'
return __salt__['config.option']('hosts.file')
def _list_hosts():
'''
Return the hosts found in the hosts file in as an OrderedDict
'''
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
if not os.path.isfile(hfn):
return ret
with salt.utils.fopen(hfn) as ifile:
for line in ifile:
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps)
return ret
def list_hosts():
'''
Return the hosts found in the hosts file in this format::
{'<ip addr>': ['alias1', 'alias2', ...]}
CLI Example:
.. code-block:: bash
salt '*' hosts.list_hosts
'''
# msgpack does not like OrderedDict's
return dict(_list_hosts())
def get_ip(host):
'''
Return the ip associated with the named host
CLI Example:
.. code-block:: bash
salt '*' hosts.get_ip <hostname>
'''
hosts = _list_hosts()
if not hosts:
return ''
# Look for the op
for addr in hosts:
if host in hosts[addr]:
return addr
# ip not found
return ''
def get_alias(ip):
'''
Return the list of aliases associated with an ip
CLI Example:
.. code-block:: bash
salt '*' hosts.get_alias <ip addr>
'''
hosts = _list_hosts()
if ip in hosts:
return hosts[ip]
return []
def has_pair(ip, alias):
'''
Return true if the alias is set
CLI Example:
.. code-block:: bash
salt '*' hosts.has_pair <ip> <alias>
'''
hosts = _list_hosts()
return ip in hosts and alias in hosts[ip]
def set_host(ip, alias):
'''
Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias>
'''
hfn = __get_hosts_filename()
ovr = False
if not os.path.isfile(hfn):
return False
lines = salt.utils.fopen(hfn).readlines()
for ind, line in enumerate(lines):
tmpline = line.strip()
if not tmpline:
continue
if tmpline.startswith('#'):
continue
comps = tmpline.split()
if comps[0] == ip:
if not ovr:
lines[ind] = ip + '\t\t' + alias + '\n'
ovr = True
else: # remove other entries
lines[ind] = ''
if not ovr:
# make sure there is a newline
if lines and not lines[-1].endswith(('\n', '\r')):
lines[-1] = '{0}\n'.format(lines[-1])
line = ip + '\t\t' + alias + '\n'
lines.append(line)
with salt.utils.fopen(hfn, 'w+') as ofile:
ofile.writelines(lines)
return True
def rm_host(ip, alias):
'''
Remove a host entry from the hosts file
CLI Example:
.. code-block:: bash
salt '*' hosts.rm_host <ip> <alias>
'''
if not has_pair(ip, alias):
return True
hfn = __get_hosts_filename()
lines = salt.utils.fopen(hfn).readlines()
for ind in range(len(lines)):
tmpline = lines[ind].strip()
if not tmpline:
continue
if tmpline.startswith('#'):
continue
comps = tmpline.split()
if comps[0] == ip:
newline = '{0}\t'.format(comps[0])
for existing in comps[1:]:
if existing == alias:
continue
newline += '\t{0}'.format(existing)
if newline.strip() == ip:
# No aliases exist for the line, make it empty
lines[ind] = ''
else:
# Only an alias was removed
lines[ind] = '{0}\n'.format(newline)
with salt.utils.fopen(hfn, 'w+') as ofile:
ofile.writelines(lines)
return True
def add_host(ip, alias):
'''
Add a host to an existing entry, if the entry is not in place then create
it with the given host
CLI Example:
.. code-block:: bash
salt '*' hosts.add_host <ip> <alias>
'''
hfn = __get_hosts_filename()
if not os.path.isfile(hfn):
return False
if has_pair(ip, alias):
return True
hosts = _list_hosts()
hosts.setdefault(ip, []).append(alias)
_write_hosts(hosts)
return True
def _write_hosts(hosts):
lines = []
for ip, aliases in hosts.iteritems():
line = '{0}\t\t{1}'.format(
ip,
'\t\t'.join(aliases)
)
lines.append(line)
hfn = __get_hosts_filename()
with salt.utils.fopen(hfn, 'w+') as ofile:
ofile.write(
'\n'.join(
[l.strip() for l in lines if l.strip()]
)
)
|
victorywang80/Maintenance
|
saltstack/src/salt/modules/hosts.py
|
Python
|
apache-2.0
| 5,355
|
"""sandcage_django_example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.http import HttpResponseRedirect
urlpatterns = [
url(r'^apigui/', include('apigui.urls')),
url(r'^$', lambda r: HttpResponseRedirect('apigui/')),
]
|
sandcage/sandcage-api-django
|
src/sandcage_django_example/urls.py
|
Python
|
apache-2.0
| 868
|
from OpenSSL import crypto
from os import path, makedirs, remove
from datetime import datetime
import re
from shutil import copy
import sys
import os
key_dir = 'keys'
index_file = os.path.join(key_dir, 'index.txt')
class SSLCertificateGenerator:
key_dir = None
index_file = None
serial = None
def __init__(self, key_dir=None):
# Define key_dir
if key_dir:
key_dir = key_dir.replace('\\', '/')
if not os.path.exists(key_dir):
os.makedirs(key_dir)
if not os.path.isdir(key_dir):
raise Exception("Key Directory does not exist or is not a directory:" + key_dir)
else:
#key_dir = path.dirname(path.realpath(__file__)) + "/../keys"
key_dir = path.join(path.dirname(path.realpath(__file__)), '..', 'keys')
key_dir = key_dir.replace('\\', '/')
self.key_dir = key_dir
self.index_file = key_dir + '/index.txt'
# Get serial number
try:
serial_file = open(key_dir + '/serial', 'r')
self.serial = int(serial_file.readline());
serial_file.close
except IOError:
self.serial = 1
def _get_cert_dn(self, cert):
dn = ''
for label, value in cert.get_subject().get_components():
dn += '/' + label + '=' + value
return dn
def _gen_key(self):
# Generate new key
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
return key
def _create_csr(self, cert_name, key):
req = crypto.X509Req()
req.get_subject().CN = cert_name
req.set_pubkey(key)
req.sign(key, "sha256")
return req
def _write_key_to_file(self, key, filepath):
key_file = open(filepath, 'w')
key_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key).decode("utf-8"))
key_file.close()
def _load_key_from_file(self, filepath):
key_file = open(filepath, 'r')
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_file.read())
key_file.close()
return key
def _write_cert_to_file(self, cert, filepath):
cert_file = open(filepath, 'w')
cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
cert_file.close()
def _load_cert_from_file(self, filepath):
cert_file = open(filepath, 'r')
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_file.read())
cert_file.close()
return cert
def _write_csr_to_file(self, csr, filepath):
csr_file = open(filepath, 'w')
csr_file.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr).decode("utf-8"))
csr_file.close()
def _load_csr_from_file(self, filepath):
csr_file = open(filepath, 'r')
csr = crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_file.read())
csr_file.close()
return csr
def _write_pfx_to_file(self, pkcs12, filepath):
pkcs12_file = open(filepath, 'wb')
pkcs12_file.write(pkcs12.export())
pkcs12_file.close()
def _write_crl_to_file(self, crl, ca_cert, ca_key, filepath):
# Write CRL file
crl_file = open(filepath, 'w')
crl_file.write(crl.export(ca_cert, ca_key, days=365).decode("utf-8"))
crl_file.close()
def _load_crl_from_file(self, filepath):
try:
crl_file = open(filepath, 'r')
crl = crypto.load_crl(crypto.FILETYPE_PEM, crl_file.read())
crl_file.close()
except IOError:
# Create new CRL file if it doesn't exist
crl = crypto.CRL()
return crl
def _sign_csr(self, req, ca_key, ca_cert, cert_org=False, cert_ou=False, usage=3, days=3650, alt_names=[]):
expiry_seconds = days * 86400
# Create and sign certificate
cert = crypto.X509()
cert.set_version(2)
cert.set_subject(req.get_subject())
if cert_org:
cert.get_subject().O = cert_org
else:
cert.get_subject().O = ca_cert.get_subject().O
if cert_ou:
cert.get_subject().OU = cert_ou
else:
cert.get_subject().OU = ca_cert.get_subject().OU
cert.set_serial_number(self.serial)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(expiry_seconds)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req.get_pubkey())
if usage == 1:
cert.add_extensions([
crypto.X509Extension(b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"keyUsage", True, b"keyCertSign, cRLSign"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=cert)
])
elif usage == 2:
cert.add_extensions([
crypto.X509Extension(b"extendedKeyUsage", True, b"serverAuth"),
])
elif usage == 3:
cert.add_extensions([
crypto.X509Extension(b"extendedKeyUsage", True, b"clientAuth"),
])
# Add alt names
if alt_names:
for name in alt_names:
name = "DNS:" + name
cert.add_extensions([
crypto.X509Extension(b"subjectAltName", False, b"DNS:" + ",DNS:".join(alt_names).encode("utf-8"))
])
cert.sign(ca_key, "sha256")
# Write to index.txt
db_line = "V\t" + cert.get_notBefore().decode("utf-8") + "\t\t" + hex(
int(cert.get_serial_number())) + "\tunknown\t" + str(cert.get_subject())[18:-2] + "\n"
index_file = open(key_dir + '/index.txt', 'a')
index_file.write(db_line)
index_file.close()
# Write updated serial file
serial_file = open(key_dir + '/serial', 'w')
serial_file.write(str(self.serial + 1))
serial_file.close()
return cert
def gen_ca(self, cert_org="Thinkbox Software", cert_ou="IT", days=3650):
expiry_seconds = days * 86400
# Generate key
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
# Set up and sign CA certificate
ca = crypto.X509()
ca.set_version(2)
ca.set_serial_number(1)
ca.get_subject().CN = "CA"
ca.get_subject().O = cert_org
ca.get_subject().OU = cert_ou
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(expiry_seconds)
ca.set_issuer(ca.get_subject())
ca.set_pubkey(key)
ca.add_extensions([
crypto.X509Extension(b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"keyUsage", True, b"keyCertSign, cRLSign"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=ca)
])
ca.sign(key, "sha256")
# Create key directory if it doesn't exist
if not path.exists(key_dir):
makedirs(key_dir)
# Write CA certificate to file
self._write_cert_to_file(ca, self.key_dir + '/ca.crt')
# Write CA key to file
self._write_key_to_file(key, self.key_dir + '/ca.key')
def get_ca_key(self):
self._load_key_from_file(path.join(self.key_dir, 'ca.key'))
def get_ca_cert(self):
self._load_cert_from_file(path.join(self.key_dir, 'ca.crt'))
def gen_cert(self, cert_name, cert_org=False, cert_ou=False, usage=3, days=3650, alt_names=[]):
# usage: 1=ca, 2=server, 3=client
if cert_name == "":
raise Exception("Certificate name cannot be blank")
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Generate new key
key = self._gen_key()
# Create CSR
req = self._create_csr(cert_name, key)
# Sign CSR
cert = self._sign_csr(req, ca_key, ca_cert, cert_org=cert_org, cert_ou=cert_ou, usage=usage, days=days,
alt_names=alt_names)
# Write new key file
self._write_key_to_file(key, self.key_dir + '/' + cert_name + '.key')
# Write new certificate file
self._write_cert_to_file(cert, self.key_dir + '/' + cert_name + '.crt')
def gen_pfx(self, cert_name):
if cert_name == "":
raise Exception("Certificate name cannot be blank")
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load Certificate
cert = self._load_cert_from_file(self.key_dir + '/' + cert_name + '.crt')
# Load Private Key
key = self._load_key_from_file(self.key_dir + '/' + cert_name + '.key')
# Set up PKCS12 structure
pkcs12 = crypto.PKCS12()
pkcs12.set_ca_certificates([ca_cert])
pkcs12.set_certificate(cert)
pkcs12.set_privatekey(key)
# Write PFX file
self._write_pfx_to_file(pkcs12, self.key_dir + '/' + cert_name + '.pfx')
def gen_csr(self, name, out_dir):
key = self._gen_key()
csr = self._create_csr(name, key)
self._write_key_to_file(key, out_dir + '/' + name + '.key')
self._write_csr_to_file(csr, out_dir + '/' + name + '.csr')
def sign_csr(self, csr_path):
csr = self._load_csr_from_file(csr_path)
ca_key = self._load_key_from_file(key_dir + '/ca.key')
ca_cert = self._load_cert_from_file(key_dir + '/ca.crt')
cert = self._sign_csr(csr, ca_key, ca_cert)
self._write_cert_to_file(cert, self.key_dir + '/' + csr.get_subject().CN + '.crt')
def revoke_cert(self, cert_name):
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA Key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Load Certificate
cert = self._load_cert_from_file(self.key_dir + '/' + cert_name + '.crt')
# Load Private Key
key = self._load_key_from_file(self.key_dir + '/' + cert_name + '.key')
# Load CRL File
crl = self._load_crl_from_file(self.key_dir + '/crl.pem')
print ('Revoking ' + cert_name + ' (Serial: ' + str(cert.get_serial_number()) + ')')
# Revoke certificate
revoked = crypto.Revoked()
revoked.set_serial(hex(int(cert.get_serial_number()))[2:].encode("utf-8"))
revoked.set_reason(b'unspecified')
revoked.set_rev_date(datetime.utcnow().strftime('%Y%m%d%H%M%SZ').encode("utf-8"))
crl.add_revoked(revoked)
# Write CRL file
self._write_crl_to_file(crl, ca_cert, ca_key, key_dir + '/crl.pem')
# Update index file
index_file = open(key_dir + '/index.txt', 'r')
index_file_new = open(key_dir + '/index.txt.new', 'w')
for line in index_file.readlines():
line_split = re.split('\t', line)
if int(line_split[3], 16) == cert.get_serial_number():
new_line = 'R\t' + line_split[1] + '\t' + revoked.get_rev_date().decode("utf-8") + '\t' + line_split[
3] + '\t' + line_split[4] + '\t' + line_split[5]
index_file_new.write(new_line)
else:
index_file_new.write(line)
index_file.close()
index_file_new.close()
copy('keys/index.txt.new', 'keys/index.txt')
remove('keys/index.txt.new')
def renew_crl(self):
# Load CA certificate
ca_cert = self._load_cert_from_file(self.key_dir + '/ca.crt')
# Load CA key
ca_key = self._load_key_from_file(self.key_dir + '/ca.key')
# Load CRL File
crl = self._load_crl_from_file(self.key_dir + '/crl.pem')
# Write CRL file
self._write_crl_to_file(crl, ca_cert, ca_key, key_dir + '/crl.pem')
def run(argv=None):
if argv is None:
argv = sys.argv
import argparse
parser = argparse.ArgumentParser(description='SSL Certificate Generator')
arg_group = parser.add_mutually_exclusive_group()
arg_group.add_argument('--ca', action='store_true', help='Generate a CA certificate')
arg_group.add_argument('--intermediate-ca', action='store_true', help='Generate an intermediate ca certificate')
arg_group.add_argument('--server', action='store_true', help='Generate a server certificate')
arg_group.add_argument('--client', action='store_true', help='Generate a client certificate')
arg_group.add_argument('--pfx', action='store_true', help='Generate a PFX File')
arg_group.add_argument('--revoke', action='store_true', help='Revoke a certificate')
arg_group.add_argument('--renew-crl', action='store_true', help='Renew CRL')
parser.add_argument('--cert-name', help='Certificate name (required with --server, --client, and --pfx)')
parser.add_argument('--cert-org', help='Certificate organization (required with --ca)')
parser.add_argument('--cert-ou', help='Certificate organizational unit (required with --ca)')
parser.add_argument('--alt-name', help='Subject Alternative Name', action='append')
args = parser.parse_args()
sslgen = SSLCertificateGenerator(key_dir)
if args.ca:
error = False
if args.cert_name:
print('Error: Certificate name was specified. CA certificate is always named "ca"')
error = True
if not args.cert_ou:
print("Error: No OU specified")
error = True
if not args.cert_org:
print("Error: No organization specified")
error = True
if error:
exit(1)
sslgen.gen_ca(cert_org=args.cert_org, cert_ou=args.cert_ou)
elif args.intermediate_ca:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=1)
elif args.server:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=2, alt_names=args.alt_name)
elif args.client:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_cert(args.cert_name, cert_org=args.cert_org, cert_ou=args.cert_ou, usage=3, alt_names=args.alt_name)
elif args.pfx:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.gen_pfx(args.cert_name)
elif args.revoke:
if not args.cert_name:
print("Error: No certificate name specified")
exit(1)
sslgen.revoke_cert(args.cert_name)
elif args.renew_crl:
sslgen.renew_crl()
else:
print("Error: Certificate type must be specified using [--ca|--server|--client|--pfx]")
exit(1)
if __name__ == '__main__':
run()
|
kevenli/scrapydd
|
scrapydd/ssl_gen.py
|
Python
|
apache-2.0
| 15,483
|
# selection.py
# This is an selection sort example
# <Chad Hobbs>
def selection_sort(list2):
for i in range(0, len (list2)): # Step through entire list
min = i # Minimum value equals outside range
for j in range(i + 1, len(list2)): # Step through everything above first loop incrementer
if list2[j] < list2[min]: # If the compared number is lower than the minimum, make it the minimum
min = j
list2[i], list2[min] = list2[min], list2[i] # After going through list, swap the outer list position number with the found lowest number
return(list2)
def main():
print("Insertion Sort")
list1 = [4,7,3,2,9,0,4,2,1,6,7]
print(list1)
list2 = selection_sort(list1)
print(list2)
main()
|
itsallvoodoo/csci-school
|
CSCI220/Week 12 - APR02-06/selection.py
|
Python
|
apache-2.0
| 874
|
import logging
import settings
from logging.handlers import SysLogHandler
from copy import copy
def initSyslogLogger():
"""
Configure syslog loggers
:return: syslog log handler
"""
# close any zombie handlers for root logger
for handler in copy(logging.getLogger().handlers):
logging.getLogger().removeHandler(handler)
handler.close()
# create our own custom formatter and handler for syslog
log_formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] [%(pathname)s->%(lineno)d]: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log_handler = logging.handlers.SysLogHandler(address="/dev/log", facility=settings.DSIP_LOG_FACILITY)
log_handler.setLevel(settings.DSIP_LOG_LEVEL)
log_handler.setFormatter(log_formatter)
# set log handler for our dsiprouter app
logging.getLogger().setLevel(settings.DSIP_LOG_LEVEL)
logging.getLogger().addHandler(log_handler)
# redirect stderr and stdout to syslog
# if not settings.DEBUG:
# sys.stderr = StreamLogger(level=logging.WARNING)
# sys.stdout = StreamLogger(level=logging.DEBUG)
return log_handler
# import this file to setup logging
# syslog handler created globally
# syslog_handler = initSyslogLogger()
|
khorsmann/dsiprouter
|
gui/sysloginit.py
|
Python
|
apache-2.0
| 1,293
|
# -*- coding: utf-8 -*-
from model.group import Group
#@pytest.mark.parametrize("group", testdata, ids=[repr(x) for x in testdata])
def test_testAddGroup1(app, db, json_groups):
group = json_groups
old_groups = db.get_group_list()
app.group.create(group)
# assert len(old_groups) + 1 == app.group.count()
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
krzychPG/QACourses_traning
|
test/test_add_group.py
|
Python
|
apache-2.0
| 484
|
###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from builtins import str
from builtins import object
import datetime
import logging
import traceback
import os
from os.path import join, dirname
import re
import csv
from argparse import ArgumentParser
import sys
import time
from copy import deepcopy
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isb_cgc.settings")
import django
django.setup()
from isb_cgc import settings
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.core.exceptions import ObjectDoesNotExist
try:
cron_user = User.objects.get(username=settings.CRON_USER)
except ObjectDoesNotExist:
print("Cron user {} not found - creating.".format(settings.CRON_USER))
cron_user = User.objects.create(username=settings.CRON_USER)
token = Token.objects.create(user=cron_user)
if settings.IS_DEV and settings.CONNECTION_IS_LOCAL:
f = open(join(dirname(__file__), '../{}{}'.format(settings.SECURE_LOCAL_PATH, "dev.cron_token.json")), "w")
f.write(str(token))
f.close()
else:
print("{} user token: {}".format(settings.CRON_USER,str(token)))
|
isb-cgc/ISB-CGC-Webapp
|
scripts/create_api_token.py
|
Python
|
apache-2.0
| 1,734
|
from copy import copy
import importlib
import numpy as np
import re
import os
import sys
from .. import lu
from .. import utils
LUI_MODEL_MAP = {'annual': 'cropland',
'nitrogen': 'cropland',
'cropland': 'cropland',
'pasture': 'pasture',
'perennial': 'cropland',
'primary': 'primary',
'rangelands': 'pasture',
'timber': 'cropland',
'urban': 'urban',
'young_secondary': 'secondary',
'intermediate_secondary': 'secondary',
'mature_secondary': 'secondary',
}
def model(name):
return LUI_MODEL_MAP[name]
class LUH2(object):
def __init__(self, name, intensity):
self._name = name
self._intensity = intensity
self._inputs = []
mod_name = model(name)
py = os.path.join(utils.lui_model_dir(), '%s.py' % mod_name)
if not os.path.isfile(py):
raise RuntimeError('could not find python module for %s' % mod_name)
rds = os.path.join(utils.lui_model_dir(), '%s.rds' % mod_name)
if not os.path.isfile(rds):
raise RuntimeError('could not find RDS file for %s' % mod_name)
if os.path.getmtime(py) < os.path.getmtime(rds):
raise RuntimeError('python module is older than RDS file for %s' % name)
if intensity != 'minimal':
if utils.lui_model_dir() not in sys.path:
sys.path.append(utils.lui_model_dir())
self._pkg = importlib.import_module(mod_name)
self._pkg_func = getattr(self._pkg, intensity)
self._pkg_inputs = getattr(self._pkg, 'inputs')()
self._inputs += [name if x == mod_name else x for x in self._pkg_inputs]
self._finputs = copy(self._inputs)
self._inputs += [name + '_' + intensity + '_ref']
def myfunc(df):
args = [df[arg] for arg in self._finputs]
return self._pkg_func(*args)
self._func = myfunc
if intensity == 'light':
self._inputs += [name + '_intense']
elif intensity == 'minimal':
self._inputs += [name + '_intense', name + '_light']
@property
def name(self):
return self._name + '_' + self.intensity
@property
def as_intense(self):
return self._name + '_intense'
@property
def as_light(self):
return self._name + '_light'
@property
def as_minimal(self):
return self._name + '_minimal'
@property
def intensity(self):
return self._intensity
@property
def syms(self):
return self._inputs
def eval(self, df):
if self.intensity == 'minimal':
res = (df[self._name] - df[self.as_intense] - df[self.as_light])
return res
res = self._func(df)
res[np.where(np.isnan(res))] = 1.0
res = np.clip(df[self.name + '_ref'] + res, 0, 1)
if self.intensity == 'light':
intense = df[self.as_intense] / (df[self._name] + 1e-10)
res = np.where(intense + res > 1, 1 - intense, res)
res *= df[self._name]
return res
def _predictify(sym, prefix):
newr = sym.replace(prefix, '')
newr = newr.replace(' vegetation', '')
newr = newr.replace(' forest', '_pri')
newr = newr.replace('Managed ', '')
newr = newr.replace(' secondary', '_secondary')
newr = re.sub(r'(Minimal|Light|Intense) use', "\\1", newr)
newr = newr.lower()
name = newr.split(' ')[0]
newr = newr.replace(' ', '_')
newr = newr.replace('pasture_light', 'pasture_minimal_and_light')
newr = newr.replace('rangelands_light', 'rangelands_light_and_intense')
assert name in lu.luh2.types(), 'unknown land use type %s' % sym
return newr
def is_luh2(syms, prefix):
for sym in syms:
try:
newr = _predictify(sym, prefix)
except AssertionError as e:
return False
return True
def predictify(root, prefix):
if isinstance(root, str) and re.match(prefix, root):
newr = _predictify(root, prefix)
return newr
return root
|
ricardog/raster-project
|
projections/lui/luh2.py
|
Python
|
apache-2.0
| 3,892
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0232
# pylint-version = 0.7.0
#
# Copyright 2004-2005 André Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text based news notifier (via NNTP)
"""
__author__ = "André Malo"
__docformat__ = "restructuredtext en"
__all__ = ['getNotifier']
def getNotifier(settings, groupset):
""" Returns an initialized notifier or nothing
:Parameters:
- `settings`: The svnmailer settings
- `groupset`: The groupset to process
:Types:
- `settings`: `svnmailer.settings._base.BaseSettings`
- `groupset`: `list`
:return: The list of notifiers (containing 0 or 1 member)
:rtype: ``list``
"""
from svnmailer.notifier import _textnews
cls = None
if settings.general.nntp_host:
cls = NNTPSubmitter
if cls:
return _textnews.getNotifier(cls, settings, groupset)
return []
class NNTPSubmitter(object):
""" Use NNTP to submit the notification as news article """
_settings = None
def sendNews(self, posting):
""" Sends the posting via nntp """
import cStringIO, nntplib
fp = cStringIO.StringIO()
try:
posting.dump(fp)
fp.seek(0)
general = self._settings.general
host, port = (general.nntp_host, nntplib.NNTP_PORT)
if ':' in host and host.find(':') == host.rfind(':'):
host, port = host.split(':', 1)
conn = nntplib.NNTP(
host = host, port = int(port), readermode = True,
user = general.nntp_user, password = general.nntp_pass,
)
conn.post(fp)
conn.quit()
finally:
fp.close()
|
m-tmatma/svnmailer
|
src/lib/svnmailer/notifier/news.py
|
Python
|
apache-2.0
| 2,291
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_eval_using_default_session = ops._eval_using_default_session
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially
ragged tensor" may be used to refer to a tensor that might be either a
`Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for four other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
```python
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
>>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
>>> rt3 = RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
>>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
>>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
```
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
```python
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print outer_rt.to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print outer_rt.ragged_rank
2
```
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
```python
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
```
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
```python
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]),
.. row_splits=[0, 2, 5])
>>> print rt.to_list()
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print rt.shape
(2, ?, 3)
```
### RaggedTensor Shape Restrictions
The shape of a RaggedTensor is currently restricted to have the following
form:
* A single uniform dimension
* Followed by one or more ragged dimensions
* Followed by zero or more uniform dimensions.
This restriction follows from the fact that each nested `RaggedTensor`
replaces the uniform outermost dimension of its `values` with a uniform
dimension followed by a ragged dimension.
"""
#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
values,
row_splits,
cached_row_lengths=None,
cached_value_rowids=None,
cached_nrows=None,
internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`.
cached_row_lengths: A 1-D integer tensor with shape `[nrows]`
cached_value_rowids: A 1-D integer tensor with shape `[nvals]`.
cached_nrows: A 1-D integer scalar tensor.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
TypeError: If a row partitioning tensor has an inappropriate dtype.
TypeError: If exactly one row partitioning argument was not specified.
ValueError: If a row partitioning tensor has an inappropriate shape.
ValueError: If multiple partitioning arguments are specified.
ValueError: If nrows is specified but value_rowids is not None.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
is_tensor_spec = isinstance(row_splits, tensor_spec.TensorSpec)
if is_tensor_spec:
if not (isinstance(values, tensor_spec.TensorSpec) or
(isinstance(values, RaggedTensor) and
isinstance(values.row_splits, tensor_spec.TensorSpec))):
raise TypeError("Expected values to be a TensorSpec, got %r" % values)
else:
# Validate the arguments.
if not isinstance(row_splits, ops.Tensor):
raise TypeError("Row-partitioning argument must be a Tensor, got %r" %
row_splits)
if not isinstance(values, (RaggedTensor, ops.Tensor)):
raise TypeError("values must be a Tensor or RaggedTensor, got %r" %
values)
if row_splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("Row-partitioning argument must be int32 or int64")
# Validate shapes & dtypes.
row_splits.shape.assert_has_rank(1)
values.shape.with_rank_at_least(1)
if not is_tensor_spec:
row_splits.set_shape([None])
if isinstance(values, RaggedTensor):
assert row_splits.dtype == values.row_splits.dtype
self._values = values
self._row_splits = row_splits
# Store any cached tensors. These are used to avoid unnecessary
# round-trip conversions when a RaggedTensor is constructed from
# lengths or rowids, and we later want those lengths/rowids back.
for tensor in [cached_row_lengths, cached_value_rowids, cached_nrows]:
if tensor is not None:
if not isinstance(tensor, ops.Tensor):
raise TypeError("Cached value must be a Tensor or None.")
elif tensor.dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cached value must be int32 or int64.")
self._cached_row_lengths = cached_row_lengths
self._cached_value_rowids = cached_value_rowids
self._cached_nrows = cached_nrows
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
values, value_rowids = cls._convert_values_and_row_partition(
values, value_rowids, "value_rowids")
if nrows is None:
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is None:
nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1
const_nrows = None
else:
const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0
nrows = ops.convert_to_tensor(const_nrows, value_rowids.dtype,
name="nrows")
else:
nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows")
const_nrows = tensor_util.constant_value(nrows)
if const_nrows is not None:
if const_nrows < 0:
raise ValueError("Expected nrows >= 0; got %d" % const_nrows)
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is not None and const_rowids.size > 0:
if not const_nrows >= const_rowids[-1] + 1:
raise ValueError(
"Expected nrows >= value_rowids[-1] + 1; got nrows=%d, "
"value_rowids[-1]=%d" % (const_nrows, const_rowids[-1]))
value_rowids.shape.assert_has_rank(1)
nrows.shape.assert_has_rank(0)
values.shape[:1].assert_is_compatible_with(value_rowids.shape)
if validate:
msg = "Arguments to from_value_rowids do not form a valid RaggedTensor"
nvals1 = _nrows(values)
nvals2 = _nrows(value_rowids)
checks = [
check_ops.assert_rank(value_rowids, 1, message=msg),
check_ops.assert_rank(nrows, 0, message=msg),
check_ops.assert_equal(nvals1, nvals2, message=msg),
check_ops.assert_non_negative(value_rowids[:1], message=msg),
_assert_monotonic_increasing(value_rowids, message=msg),
check_ops.assert_less(value_rowids[-1:], nrows, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
value_rowids = control_flow_ops.with_dependencies(checks, value_rowids)
# Convert value_rowids & nrows to row_splits.
# Note: we don't use segment_ids_to_row_splits() here because we want
# to save the intermediate value `row_lengths`, so we can cache it.
# TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the
# cast.
value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32)
nrows_int32 = math_ops.cast(nrows, dtypes.int32)
row_lengths = math_ops.bincount(
value_rowids_int32,
minlength=nrows_int32,
maxlength=nrows_int32,
dtype=value_rowids.dtype)
row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
if const_nrows is not None:
row_lengths.set_shape([const_nrows])
row_splits.set_shape([const_nrows + 1])
return cls(
values,
row_splits,
cached_row_lengths=row_lengths,
cached_value_rowids=value_rowids,
cached_nrows=nrows,
internal=True)
@classmethod
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(row_splits, (list, tuple)) and not row_splits:
raise ValueError("row_splits tensor may not be empty.")
if isinstance(row_splits, tensor_spec.TensorSpec):
return cls(values=values, row_splits=row_splits, internal=True)
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
values, row_splits = cls._convert_values_and_row_partition(
values, row_splits, "row_splits")
row_splits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_splits do not form a valid RaggedTensor"
nvals = _nrows(values, row_splits.dtype)
checks = [
check_ops.assert_rank(row_splits, 1, message=msg),
_assert_zero(row_splits[0], message=msg),
_assert_monotonic_increasing(row_splits, message=msg),
check_ops.assert_equal(row_splits[-1], nvals, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_splits = control_flow_ops.with_dependencies(checks, row_splits)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
values, row_lengths = cls._convert_values_and_row_partition(
values, row_lengths, "row_lengths")
row_lengths.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_lengths do not form a valid RaggedTensor"
nvals1 = math_ops.reduce_sum(row_lengths)
nvals2 = _nrows(values, row_lengths.dtype)
checks = [
check_ops.assert_rank(row_lengths, 1, message=msg),
check_ops.assert_non_negative(row_lengths, message=msg),
check_ops.assert_equal(nvals1, nvals2, message=msg)
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)
row_limits = math_ops.cumsum(row_lengths)
row_splits = array_ops.concat([[0], row_limits], axis=0)
return cls(
values=values,
row_splits=row_splits,
cached_row_lengths=row_lengths,
internal=True)
@classmethod
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values, row_starts = cls._convert_values_and_row_partition(
values, row_starts, "row_starts")
row_starts.shape.assert_has_rank(1)
nvals = _nrows(values, row_starts.dtype)
if validate:
msg = "Arguments to from_row_starts do not form a valid RaggedTensor"
checks = [
check_ops.assert_rank(row_starts, 1, message=msg),
_assert_zero(row_starts[:1], message=msg),
_assert_monotonic_increasing(row_starts, message=msg),
check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_starts = control_flow_ops.with_dependencies(checks, row_starts)
row_splits = array_ops.concat([row_starts, [nvals]], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values, row_limits = cls._convert_values_and_row_partition(
values, row_limits, "row_limits")
row_limits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_limits do not form a valid RaggedTensor"
nvals = _nrows(values, row_limits.dtype)
checks = [
check_ops.assert_rank(row_limits, 1, message=msg),
check_ops.assert_non_negative(row_limits[:1], message=msg),
_assert_monotonic_increasing(row_limits, message=msg),
check_ops.assert_equal(row_limits[-1:], nvals, message=msg)
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_limits = control_flow_ops.with_dependencies(checks, row_limits)
zero = array_ops.zeros([1], row_limits.dtype)
row_splits = array_ops.concat([zero, row_limits], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError("nested_value_rowids must be a list of Tensors")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError("nested_nrows must be a list of Tensors")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError("nested_nrows must have the same length as "
"nested_value_rowids")
with ops.name_scope(
name, "RaggedFromNestedValueRowIds",
[flat_values] + list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(result, value_rowids, nrows,
validate=validate)
return result
@classmethod
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError("nested_row_splits must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError("nested_row_lengths must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _convert_values_and_row_partition(cls, values, partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
partition: A row-partitioning tensor for the `RaggedTensor` being
constructed. I.e., one of: row_splits, row_lengths, row_starts,
row_limits, value_rowids.
name: The name of the row-partitioning tensor.
Returns:
A tuple (values, partition).
"""
if isinstance(values, RaggedTensor):
if isinstance(partition, ops.Tensor):
if partition.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("%s must have dtype int32 or int64" % name)
if values.row_splits.dtype != partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("dtype mismatch: %s (%s) vs values.row_splits (%s)"
% (name, partition.dtype, values.row_splits.dtype))
partition = math_ops.cast(partition, dtypes.int64)
values = values.with_row_splits_dtype(dtypes.int64)
else:
partition = ops.convert_to_tensor(partition, values.row_splits.dtype,
name=name)
else:
values = ops.convert_to_tensor(values, name="values")
partition = ops.convert_to_tensor(
partition, preferred_dtype=dtypes.int64,
name=name)
if partition.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("%s must have dtype int32 or int64" % name)
return (values, partition)
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
```python
>>> ragged.constant([[0], [1, 2]]).shape
TensorShape([Dimension(2), Dimension(None)])
>>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([Dimension(2), Dimension(None), Dimension(2)
```
"""
nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1
values_shape = self._values.shape
value_shape = values_shape[1:]
return tensor_shape.TensorShape([nrows, None]).concatenate(value_shape)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor.
Returns:
A Python `int` indicating the number of ragged dimensions in this ragged
tensor. The outermost dimension is not considered ragged.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
return self._values
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.row_splits # indices of row splits in rt.values
tf.Tensor([0, 4, 4, 7, 8, 8])
```
"""
return self._row_splits
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print rt.flat_values()
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
```python
>>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits()):
... print('Splits for dimension %d: %s' % (i+1, splits))
Splits for dimension 1: [0, 1]
Splits for dimension 2: [0, 3, 3, 5]
Splits for dimension 3: [0, 4, 4, 7, 8, 8]
```
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.value_rowids()
tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values
```
"""
if self._cached_value_rowids is not None:
return self._cached_value_rowids
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return segment_id_ops.row_splits_to_segment_ids(self.row_splits)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.nrows() # rt has 5 rows.
5
```
"""
if out_type is None:
out_type = self._row_splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
if self._cached_nrows is not None:
return math_ops.cast(self._cached_nrows, out_type)
with ops.name_scope(name, "RaggedNRows", [self]):
return array_ops.shape(self.row_splits, out_type=out_type)[0] - 1
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_starts() # indices of row starts in rt.values
tf.Tensor([0, 4, 4, 7, 8])
```
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self.row_splits[:-1]
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_limits() # indices of row limits in rt.values
tf.Tensor([4, 4, 7, 8, 8])
```
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self.row_splits[1:]
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> rt.row_lengths(rt) # lengths of rows in rt
tf.Tensor([2, 0, 2, 1, 0])
>>> rt.row_lengths(axis=2) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
```
"""
if self._cached_row_lengths is not None:
return self._cached_row_lengths
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = ragged_util.get_positive_axis(axis, self.shape.ndims)
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_splits.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_splits.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rtnested_row_lengths()` is a tuple containing the `row_lengths` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
```python
>>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape()
[5, 4]
```
"""
if out_type is None:
out_type = self._row_splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = array_ops.stack([splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
])
inner_dimensions = flat_values_shape[1:]
bbox = array_ops.concat([ragged_dimensions, inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_splits.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
new_values,
self._row_splits,
self._cached_row_lengths,
self._cached_value_rowids,
self._cached_nrows,
internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same
number of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, ops.Tensor):
return self.with_values(new_values)
else:
return self.with_values(self.values.with_flat_values(new_values))
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be int32 or int64")
if self._row_splits.dtype == dtype:
return self
row_splits = math_ops.cast(self._row_splits, dtype)
values = self._values
if isinstance(values, RaggedTensor):
values = values.with_row_splits_dtype(dtype)
cached_row_lengths = self._cached_row_lengths
if cached_row_lengths is not None:
cached_row_lengths = math_ops.cast(cached_row_lengths, dtype)
cached_value_rowids = self._cached_value_rowids
if cached_value_rowids is not None:
cached_value_rowids = math_ops.cast(cached_value_rowids, dtype)
cached_nrows = self._cached_nrows
if cached_value_rowids is not None:
cached_value_rowids = math_ops.cast(cached_value_rowids, dtype)
return RaggedTensor(values, row_splits, cached_row_lengths,
cached_value_rowids, cached_nrows, internal=True)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
```python
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
[[0, 0], [3, 0], [0, 0]],
[[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
```
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify lengths or padding, but not both")
if not isinstance(ragged_rank, int):
raise TypeError("ragged_rank expected int, got %r" % ragged_rank)
if ragged_rank <= 0:
raise ValueError(
"ragged_rank must be greater than 0; got %s" % ragged_rank)
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
# Flatten `tensor` to eliminate all but the last ragged dimension.
new_shape = array_ops.concat([
constant_op.constant([-1], row_splits_dtype),
input_shape[ragged_rank:]
],
axis=0)
flattened = array_ops.reshape(tensor, new_shape)
# Recursively convert the flattened tensor.
values = cls.from_tensor(flattened, lengths, padding,
row_splits_dtype=row_splits_dtype)
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = math_ops.cumprod(input_shape)
# Construct splits tensors for the dimensions that were flattened.
new_splits = [
math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
for dim in range(1, ragged_rank)
]
return cls.from_nested_row_splits(values, new_splits, validate=False)
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault * array_ops.expand_dims(
math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
if isinstance(lengths,
(list, tuple)) and len(lengths) and not isinstance(
lengths[0], (int, float)):
# In this case, we've been given nested row lengths. Rather than
# reconstructing the tensor mask directly, we can recreate it as
# a boolean RaggedTensor, then densify that and use that as the
# mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(
masked_data, lengths, validate=False)
else:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
nrows = input_shape[0]
nvals = nrows * ncols
splits = math_ops.range(nrows + 1) * ncols
values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
return cls.from_row_splits(values, splits, validate=False)
def to_tensor(self, default_value=None, name=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
Example:
```python
>>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print rt.to_tensor()
[[9 8 7]
[0 0 0]
[6 5 0]
[4 0 0]]
```
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
# If ragged_rank > 1, then recursively convert the ragged values into a
# `Tensor` before we proceed.
values = self.values
if is_ragged(values):
values = values.to_tensor(default_value)
# Tile the default value, if necessary.
if default_value is not None:
if values.shape.ndims is not None:
default_value.shape.with_rank_at_most(values.shape.ndims - 1)
if (values.shape.ndims is None or default_value.shape.ndims is None or
values.shape.ndims != default_value.shape.ndims + 1):
value_shape = array_ops.shape(values)[1:]
default_value = array_ops.broadcast_to(default_value, value_shape)
default_value.shape.assert_is_compatible_with(values.shape[1:])
# Get the expected dense shape ([nrows, ncols] + value_shape).
rt_row_lengths = [self.row_splits[1:] - self.row_splits[:-1]]
nrows = array_ops.shape(self.row_splits,
out_type=self._row_splits.dtype)[0] - 1
ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0)
values_shape = array_ops.shape(values, out_type=self._row_splits.dtype)
value_shape = values_shape[1:]
nvals = values_shape[0]
# Build a default value if none was supplied.
if default_value is None:
default_value = array_ops.zeros(value_shape, dtype=values.dtype)
default_value.shape.assert_is_compatible_with(values.shape[1:])
default_value.set_shape(values.shape[1:])
# Get the row start indices, and expand to shape=[nrows, 1].
starts = array_ops.expand_dims(self.row_splits[:-1], 1)
# Get the row limit indices, and expand to shape=[nrows, 1].
limits = array_ops.expand_dims(self.row_splits[1:], 1)
# Get the column indices, and expand to shape=[1, ncols].
columns = array_ops.expand_dims(math_ops.range(0, ncols), 0)
# Build a list containing the values plus the default value. We will use
# tf.gather to collect values from this list for the `Tensor` (using
# nvals as the index for the default value).
values_and_default = array_ops.concat(
[values, array_ops.stack([default_value])], axis=0)
# Construct a matrix "indices" pointing into values_and_default. I.e.,
# output[r, c] = values_and_default[indices[r, c].
nondefault_index = starts + columns
has_value = nondefault_index < limits
default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals)
indices = array_ops.where(has_value, nondefault_index, default_index)
# Gather the results into a `Tensor`.
return array_ops.gather(values_and_default, indices)
@classmethod
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
```python
>>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> rt.RaggedTensor.from_sparse(st).eval().tolist()
[[1, 2, 3], [4], [], [5]]
```
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__)
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.SparseTensor`.
Example:
```python
>>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> rt.to_sparse().eval()
SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]],
values=[1, 2, 3, 4, 5, 6],
dense_shape=[4, 3])
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
```python
>>> rt = ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = ragged.stack([et, et])
>>> ragged.RaggedTensor._from_variant( # scalar input.
et, dtype=tf.int32, output_ragged_rank=1).eval().tolist()
[[0], [1, 2]]
>>> ragged.RaggedTensor._from_variant( # batched input.
stacked_et, dtype=tf.int32, output_ragged_rank=2).eval().tolist()
[[[0], [1, 2]], [[0], [1, 2]]]
```
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This
is optional and inferred dynamically if not provided.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
"output_ragged_rank must be equal to input_ragged_rank +"
"variant.shape.ndims, found variant.shape.ndims: %d, "
"input_ragged_rank: %d, output_ragged_rank: %d" %
(variant.shape.ndims, input_ragged_rank, output_ragged_rank))
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype, dtypes.int64,
name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
#=============================================================================
# String Encoding
#=============================================================================
def __str__(self):
if self._is_eager():
return "<tf.RaggedTensor %s>" % self.to_list()
else:
return self.__repr__()
def __repr__(self):
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self._values,
self._row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if self._is_eager():
return self._eager_value().to_list()
else:
raise ValueError("RaggedTensor.to_list() is only supported in eager "
"mode; in graph mode, evaluate the RaggedTensor first "
"and then use RaggedTensorValue.to_list().")
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Indexing & Slicing
#=============================================================================
def __getitem__(self, key):
"""Returns the specified piece of this RaggedTensor."""
# See ragged_getitem.py for the documentation and implementation of this
# method.
#
# Note: the imports in ragged/__init__.py ensure that this method always
# gets overridden before it is called.
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
#=============================================================================
# Composite Tensor
#=============================================================================
def _to_components(self):
return (self.flat_values,) + self.nested_row_splits
@classmethod
def _from_components(cls, components, metadata):
return cls.from_nested_row_splits(
components[0], components[1:], validate=False)
def _shape_invariant_to_components(self, shape=None):
ragged_rank = self.ragged_rank
flat_values = self.flat_values
if shape is None:
# Default shape invariant
value_shape = flat_values.shape[1:]
values_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)
return ((values_shape, self._row_splits.shape) +
tuple(tensor_shape.TensorShape([None])
for i in range(1, ragged_rank)))
else:
# Explicitly specified shape invariant
if shape.ndims is not None and shape.ndims <= ragged_rank:
raise ValueError("Shape invariant %s does not have sufficient rank "
"for a RaggedTensor with %d ragged dimensions." %
(shape, self.ragged_rank))
if any(tensor_shape.dimension_value(shape[dim]) is not None
for dim in range(1, self.ragged_rank + 1)):
raise ValueError("Shape invariant dimension size must be None for "
"ragged dimenions.")
nrows = tensor_shape.dimension_value(shape[0])
value_shape = shape[self.ragged_rank + 1:]
values_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)
if nrows is None:
outer_splits_shape = tensor_shape.TensorShape([None])
else:
outer_splits_shape = tensor_shape.TensorShape([nrows + 1])
return ((values_shape, outer_splits_shape) +
tuple(tensor_shape.TensorShape([None])
for i in range(1, ragged_rank)))
@property
def _is_graph_tensor(self):
return hasattr(self._row_splits, "graph")
def consumers(self):
return self._consumers()
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError("Unexpected keyword args %r" % kwargs)
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(t.with_row_splits_dtype(dtypes.int64)
if isinstance(t, RaggedTensor) else t for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError("Tensor conversion requested dtype %s for "
"RaggedTensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
preferred_dtype=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor(
value=value, dtype=dtype, preferred_dtype=preferred_dtype, name=name)
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType(object):
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
|
ghchinoy/tensorflow
|
tensorflow/python/ops/ragged/ragged_tensor.py
|
Python
|
apache-2.0
| 86,181
|
from datetime import datetime
from django.utils.timezone import utc
import mock
import pytest
from awx.main.models import (JobEvent, ProjectUpdateEvent, AdHocCommandEvent,
InventoryUpdateEvent, SystemJobEvent)
@pytest.mark.parametrize('job_identifier, cls', [
['job_id', JobEvent],
['project_update_id', ProjectUpdateEvent],
['ad_hoc_command_id', AdHocCommandEvent],
['inventory_update_id', InventoryUpdateEvent],
['system_job_id', SystemJobEvent],
])
@pytest.mark.parametrize('created', [
datetime(2018, 1, 1).isoformat(), datetime(2018, 1, 1)
])
def test_event_parse_created(job_identifier, cls, created):
with mock.patch.object(cls, 'objects') as manager:
cls.create_from_data(**{
job_identifier: 123,
'created': created
})
expected_created = datetime(2018, 1, 1).replace(tzinfo=utc)
manager.create.assert_called_with(**{
job_identifier: 123,
'created': expected_created
})
@pytest.mark.parametrize('job_identifier, cls', [
['job_id', JobEvent],
['project_update_id', ProjectUpdateEvent],
['ad_hoc_command_id', AdHocCommandEvent],
['inventory_update_id', InventoryUpdateEvent],
['system_job_id', SystemJobEvent],
])
def test_playbook_event_strip_invalid_keys(job_identifier, cls):
with mock.patch.object(cls, 'objects') as manager:
cls.create_from_data(**{
job_identifier: 123,
'extra_key': 'extra_value'
})
manager.create.assert_called_with(**{job_identifier: 123})
@pytest.mark.parametrize('field', [
'play', 'role', 'task', 'playbook'
])
def test_really_long_event_fields(field):
with mock.patch.object(JobEvent, 'objects') as manager:
JobEvent.create_from_data(**{
'job_id': 123,
'event_data': {field: 'X' * 4096}
})
manager.create.assert_called_with(**{
'job_id': 123,
'event_data': {field: 'X' * 1021 + '...'}
})
|
wwitzel3/awx
|
awx/main/tests/unit/models/test_events.py
|
Python
|
apache-2.0
| 2,034
|
from eight_mile.utils import get_version
import tensorflow as tf
if not tf.executing_eagerly():
from baseline.tf.lm.training.feed import *
else:
from baseline.tf.lm.training.eager import *
from baseline.tf.lm.training.distributed import *
|
dpressel/baseline
|
baseline/tf/lm/training/__init__.py
|
Python
|
apache-2.0
| 251
|
from django import forms
from models import SignUp
class SignUpForm(forms.ModelForm):
class Meta:
model = SignUp
|
sonnykr/blog
|
SignUps/forms.py
|
Python
|
apache-2.0
| 126
|
""" Cisco_IOS_XR_ipv4_acl_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-acl package operational data.
This module contains definitions
for the following management objects\:
ipv4\-acl\-and\-prefix\-list\: Root class of IPv4 Oper schema tree
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_common_acl_datatypes import AclUsageAppIdEnumEnum
class AclAce1Enum(Enum):
"""
AclAce1Enum
ACE Types
.. data:: NORMAL = 0
This is Normal ACE
.. data:: REMARK = 1
This is Remark ACE
.. data:: ABF = 2
This is ABF ACE
"""
NORMAL = 0
REMARK = 1
ABF = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclAce1Enum']
class AclAce1Enum(Enum):
"""
AclAce1Enum
ACE Types
.. data:: NORMAL = 0
This is Normal ACE
.. data:: REMARK = 1
This is Remark ACE
.. data:: ABF = 2
This is ABF ACE
"""
NORMAL = 0
REMARK = 1
ABF = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclAce1Enum']
class AclActionEnum(Enum):
"""
AclActionEnum
Acl action
.. data:: DENY = 0
Deny
.. data:: PERMIT = 1
Permit
.. data:: ENCRYPT = 2
Encrypt
.. data:: BYPASS = 3
Bypass
.. data:: FALLTHROUGH = 4
Fallthrough
.. data:: INVALID = 5
Invalid
"""
DENY = 0
PERMIT = 1
ENCRYPT = 2
BYPASS = 3
FALLTHROUGH = 4
INVALID = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclActionEnum']
class AclLogEnum(Enum):
"""
AclLogEnum
Acl log
.. data:: LOG_NONE = 0
Log None
.. data:: LOG = 1
Log Regular
.. data:: LOG_INPUT = 2
Log Input
"""
LOG_NONE = 0
LOG = 1
LOG_INPUT = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclLogEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: NONE = 0
None
.. data:: EQ = 1
Equal
.. data:: GT = 2
Greater than
.. data:: LT = 3
Less than
.. data:: NEQ = 4
Not Equal
.. data:: RANGE = 5
Range
.. data:: ONEBYTE = 8
One Byte
.. data:: TWOBYTES = 9
Two Bytes
"""
NONE = 0
EQ = 1
GT = 2
LT = 3
NEQ = 4
RANGE = 5
ONEBYTE = 8
TWOBYTES = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: NONE = 0
None
.. data:: EQ = 1
Equal
.. data:: GT = 2
Greater than
.. data:: LT = 3
Less than
.. data:: NEQ = 4
Not Equal
.. data:: RANGE = 5
Range
.. data:: ONEBYTE = 8
One Byte
.. data:: TWOBYTES = 9
Two Bytes
"""
NONE = 0
EQ = 1
GT = 2
LT = 3
NEQ = 4
RANGE = 5
ONEBYTE = 8
TWOBYTES = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: NONE = 0
None
.. data:: EQ = 1
Equal
.. data:: GT = 2
Greater than
.. data:: LT = 3
Less than
.. data:: NEQ = 4
Not Equal
.. data:: RANGE = 5
Range
.. data:: ONEBYTE = 8
One Byte
.. data:: TWOBYTES = 9
Two Bytes
"""
NONE = 0
EQ = 1
GT = 2
LT = 3
NEQ = 4
RANGE = 5
ONEBYTE = 8
TWOBYTES = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclTcpflagsOperatorEnum(Enum):
"""
AclTcpflagsOperatorEnum
Acl tcpflags operator
.. data:: MATCH_NONE = 0
Match None
.. data:: MATCH_ALL = 1
Match All
.. data:: MATCH_ANY_OLD = 2
Match any old
.. data:: MATCH_ANY = 3
Match any
"""
MATCH_NONE = 0
MATCH_ALL = 1
MATCH_ANY_OLD = 2
MATCH_ANY = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['AclTcpflagsOperatorEnum']
class BagAclNhEnum(Enum):
"""
BagAclNhEnum
Bag acl nh
.. data:: NEXTHOP_NONE = 0
Next Hop None
.. data:: NEXTHOP_DEFAULT = 1
Nexthop Default
.. data:: NEXTHOP = 2
Nexthop
"""
NEXTHOP_NONE = 0
NEXTHOP_DEFAULT = 1
NEXTHOP = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['BagAclNhEnum']
class BagAclNhStatusEnum(Enum):
"""
BagAclNhStatusEnum
Bag acl nh status
.. data:: NOT_PRESENT = 0
State Not Present
.. data:: UNKNOWN = 1
State Unknown
.. data:: DOWN = 2
State DOWN
.. data:: UP = 3
State UP
"""
NOT_PRESENT = 0
UNKNOWN = 1
DOWN = 2
UP = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['BagAclNhStatusEnum']
class Ipv4AclAndPrefixList(object):
"""
Root class of IPv4 Oper schema tree
.. attribute:: access_list_manager
Access list manager containing access lists and prefix lists
**type**\: :py:class:`AccessListManager <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager>`
.. attribute:: oor
Out Of Resources, Limits to the resources allocatable
**type**\: :py:class:`Oor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.access_list_manager = Ipv4AclAndPrefixList.AccessListManager()
self.access_list_manager.parent = self
self.oor = Ipv4AclAndPrefixList.Oor()
self.oor.parent = self
class AccessListManager(object):
"""
Access list manager containing access lists and
prefix lists
.. attribute:: accesses
Access listL class displaying Usage and Entries
**type**\: :py:class:`Accesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses>`
.. attribute:: prefixes
Table of prefix lists
**type**\: :py:class:`Prefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes>`
.. attribute:: usages
Table of Usage statistics of access lists at different nodes
**type**\: :py:class:`Usages <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Usages>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accesses = Ipv4AclAndPrefixList.AccessListManager.Accesses()
self.accesses.parent = self
self.prefixes = Ipv4AclAndPrefixList.AccessListManager.Prefixes()
self.prefixes.parent = self
self.usages = Ipv4AclAndPrefixList.AccessListManager.Usages()
self.usages.parent = self
class Prefixes(object):
"""
Table of prefix lists
.. attribute:: prefix
Name of the prefix list
**type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = YList()
self.prefix.parent = self
self.prefix.name = 'prefix'
class Prefix(object):
"""
Name of the prefix list
.. attribute:: prefix_list_name <key>
Name of the prefix list
**type**\: str
.. attribute:: prefix_list_sequences
Table of all the SequenceNumbers per prefix list
**type**\: :py:class:`PrefixListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.prefix_list_sequences = Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences()
self.prefix_list_sequences.parent = self
class PrefixListSequences(object):
"""
Table of all the SequenceNumbers per prefix
list
.. attribute:: prefix_list_sequence
Sequence Number of a prefix list entry
**type**\: list of :py:class:`PrefixListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_sequence = YList()
self.prefix_list_sequence.parent = self
self.prefix_list_sequence.name = 'prefix_list_sequence'
class PrefixListSequence(object):
"""
Sequence Number of a prefix list entry
.. attribute:: sequence_number <key>
Sequence Number of the prefix list entry
**type**\: int
**range:** 1..2147483646
.. attribute:: acl_name
ACL Name
**type**\: str
.. attribute:: grant
Grant value permit/deny
**type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclActionEnum>`
.. attribute:: hits
Number of hits
**type**\: int
**range:** 0..4294967295
.. attribute:: item_type
ACE type (prefix, remark)
**type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclAce1Enum>`
.. attribute:: maximum_length
Maximum length
**type**\: int
**range:** 0..4294967295
.. attribute:: minimum_length
Min length
**type**\: int
**range:** 0..4294967295
.. attribute:: operator_
Port Operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
.. attribute:: prefix
Prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..4294967295
.. attribute:: remark
Remark String
**type**\: str
.. attribute:: sequence
ACLE sequence number
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.acl_name = None
self.grant = None
self.hits = None
self.item_type = None
self.maximum_length = None
self.minimum_length = None
self.operator_ = None
self.prefix = None
self.prefix_length = None
self.remark = None
self.sequence = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-sequence[Cisco-IOS-XR-ipv4-acl-oper:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.sequence_number is not None:
return True
if self.acl_name is not None:
return True
if self.grant is not None:
return True
if self.hits is not None:
return True
if self.item_type is not None:
return True
if self.maximum_length is not None:
return True
if self.minimum_length is not None:
return True
if self.operator_ is not None:
return True
if self.prefix is not None:
return True
if self.prefix_length is not None:
return True
if self.remark is not None:
return True
if self.sequence is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-sequences'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_sequence is not None:
for child_ref in self.prefix_list_sequence:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences']['meta_info']
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:prefixes/Cisco-IOS-XR-ipv4-acl-oper:prefix[Cisco-IOS-XR-ipv4-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_name is not None:
return True
if self.prefix_list_sequences is not None and self.prefix_list_sequences._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.prefix is not None:
for child_ref in self.prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes']['meta_info']
class Accesses(object):
"""
Access listL class displaying Usage and Entries
.. attribute:: access
Name of the Access List
**type**\: list of :py:class:`Access <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access = YList()
self.access.parent = self
self.access.name = 'access'
class Access(object):
"""
Name of the Access List
.. attribute:: access_list_name <key>
Name of the Access List
**type**\: str
.. attribute:: access_list_sequences
Table of all the SequenceNumbers per access list
**type**\: :py:class:`AccessListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.access_list_sequences = Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences()
self.access_list_sequences.parent = self
class AccessListSequences(object):
"""
Table of all the SequenceNumbers per access
list
.. attribute:: access_list_sequence
Sequence Number of an access list entry
**type**\: list of :py:class:`AccessListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_sequence = YList()
self.access_list_sequence.parent = self
self.access_list_sequence.name = 'access_list_sequence'
class AccessListSequence(object):
"""
Sequence Number of an access list entry
.. attribute:: sequence_number <key>
ACLEntry Sequence Number
**type**\: int
**range:** 1..2147483646
.. attribute:: acl_name
ACL Name
**type**\: str
.. attribute:: capture
Capture option, TRUE if enabled
**type**\: bool
.. attribute:: counter_name
Counter name
**type**\: str
.. attribute:: destination_address
Destination address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_address_mask
Destination mask
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_operator
Destination operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
.. attribute:: destination_port1
Destination port 1
**type**\: int
**range:** 0..65535
.. attribute:: destination_port2
Destination port 2
**type**\: int
**range:** 0..65535
.. attribute:: destination_port_group
Destination port object\-group
**type**\: str
.. attribute:: destination_prefix_group
Destination prefix object\-group
**type**\: str
.. attribute:: dscp
DSCP or DSCP range start
**type**\: int
**range:** 0..255
.. attribute:: dscp2
DSCP Range End
**type**\: int
**range:** 0..255
.. attribute:: dscp_operator
DSCP Operator
**type**\: int
**range:** 0..255
.. attribute:: dscp_present
DSCP present
**type**\: bool
.. attribute:: dynamic
Is dynamic ACE
**type**\: bool
.. attribute:: fragments
Fragments
**type**\: int
**range:** 0..255
.. attribute:: grant
Permit/deny
**type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclActionEnum>`
.. attribute:: hits
Number of hits
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: hw_next_hop_info
HW Next hop info
**type**\: :py:class:`HwNextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo>`
.. attribute:: is_icmp_off
True if ICMP off
**type**\: bool
.. attribute:: item_type
ACE type (acl, remark)
**type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclAce1Enum>`
.. attribute:: log_option
Log option
**type**\: :py:class:`AclLogEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclLogEnum>`
.. attribute:: next_hop_info
Next hop info
**type**\: list of :py:class:`NextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo>`
.. attribute:: next_hop_type
Next hop type
**type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhEnum>`
.. attribute:: no_stats
No stats
**type**\: bool
.. attribute:: port_length1
Port length 1
**type**\: int
**range:** 0..65535
.. attribute:: port_length2
Port length 2
**type**\: int
**range:** 0..65535
.. attribute:: port_length_operator
Port length operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
.. attribute:: precedence
Precedence
**type**\: int
**range:** 0..255
.. attribute:: precedence_present
Precedence present
**type**\: bool
.. attribute:: protocol
IPv4 protocol type
**type**\: int
**range:** 0..65535
.. attribute:: remark
Remark String
**type**\: str
.. attribute:: sequence
ACLE sequence number
**type**\: int
**range:** 0..4294967295
.. attribute:: sorce_operator
Deprecated by Source operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
.. attribute:: sorce_port1
Deprecated by SourcePort1
**type**\: int
**range:** 0..65535
.. attribute:: sorce_port2
Deprecated by SourcePort2
**type**\: int
**range:** 0..65535
.. attribute:: source_address
Source address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_address_mask
Source mask
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_operator
Source operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
.. attribute:: source_port1
Source port 1
**type**\: int
**range:** 0..65535
.. attribute:: source_port2
Source port 2
**type**\: int
**range:** 0..65535
.. attribute:: source_port_group
Source port object\-group
**type**\: str
.. attribute:: source_prefix_group
Source prefix object\-group
**type**\: str
.. attribute:: tcp_flags
TCP flags
**type**\: int
**range:** 0..255
.. attribute:: tcp_flags_mask
TCP flags mask
**type**\: int
**range:** 0..255
.. attribute:: tcp_flags_operator
TCP flags operator
**type**\: :py:class:`AclTcpflagsOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclTcpflagsOperatorEnum>`
.. attribute:: ttl1
TTL 1
**type**\: int
**range:** 0..65535
.. attribute:: ttl2
TTL 2
**type**\: int
**range:** 0..65535
.. attribute:: ttl_operator
TTL operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.acl_name = None
self.capture = None
self.counter_name = None
self.destination_address = None
self.destination_address_mask = None
self.destination_operator = None
self.destination_port1 = None
self.destination_port2 = None
self.destination_port_group = None
self.destination_prefix_group = None
self.dscp = None
self.dscp2 = None
self.dscp_operator = None
self.dscp_present = None
self.dynamic = None
self.fragments = None
self.grant = None
self.hits = None
self.hw_next_hop_info = Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo()
self.hw_next_hop_info.parent = self
self.is_icmp_off = None
self.item_type = None
self.log_option = None
self.next_hop_info = YList()
self.next_hop_info.parent = self
self.next_hop_info.name = 'next_hop_info'
self.next_hop_type = None
self.no_stats = None
self.port_length1 = None
self.port_length2 = None
self.port_length_operator = None
self.precedence = None
self.precedence_present = None
self.protocol = None
self.remark = None
self.sequence = None
self.sorce_operator = None
self.sorce_port1 = None
self.sorce_port2 = None
self.source_address = None
self.source_address_mask = None
self.source_operator = None
self.source_port1 = None
self.source_port2 = None
self.source_port_group = None
self.source_prefix_group = None
self.tcp_flags = None
self.tcp_flags_mask = None
self.tcp_flags_operator = None
self.ttl1 = None
self.ttl2 = None
self.ttl_operator = None
class HwNextHopInfo(object):
"""
HW Next hop info
.. attribute:: next_hop
The Next Hop
**type**\: int
**range:** 0..4294967295
.. attribute:: type
the next\-hop type
**type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhEnum>`
.. attribute:: vrf_name
VRF name
**type**\: str
**range:** 0..32
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.type = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:hw-next-hop-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.type is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo']['meta_info']
class NextHopInfo(object):
"""
Next hop info
.. attribute:: is_acl_next_hop_exist
The nexthop exist
**type**\: bool
.. attribute:: next_hop
The next hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: status
The next hop status
**type**\: :py:class:`BagAclNhStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhStatusEnum>`
.. attribute:: track_name
Track name
**type**\: str
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_acl_next_hop_exist = None
self.next_hop = None
self.status = None
self.track_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:next-hop-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_acl_next_hop_exist is not None:
return True
if self.next_hop is not None:
return True
if self.status is not None:
return True
if self.track_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:access-list-sequence[Cisco-IOS-XR-ipv4-acl-oper:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.sequence_number is not None:
return True
if self.acl_name is not None:
return True
if self.capture is not None:
return True
if self.counter_name is not None:
return True
if self.destination_address is not None:
return True
if self.destination_address_mask is not None:
return True
if self.destination_operator is not None:
return True
if self.destination_port1 is not None:
return True
if self.destination_port2 is not None:
return True
if self.destination_port_group is not None:
return True
if self.destination_prefix_group is not None:
return True
if self.dscp is not None:
return True
if self.dscp2 is not None:
return True
if self.dscp_operator is not None:
return True
if self.dscp_present is not None:
return True
if self.dynamic is not None:
return True
if self.fragments is not None:
return True
if self.grant is not None:
return True
if self.hits is not None:
return True
if self.hw_next_hop_info is not None and self.hw_next_hop_info._has_data():
return True
if self.is_icmp_off is not None:
return True
if self.item_type is not None:
return True
if self.log_option is not None:
return True
if self.next_hop_info is not None:
for child_ref in self.next_hop_info:
if child_ref._has_data():
return True
if self.next_hop_type is not None:
return True
if self.no_stats is not None:
return True
if self.port_length1 is not None:
return True
if self.port_length2 is not None:
return True
if self.port_length_operator is not None:
return True
if self.precedence is not None:
return True
if self.precedence_present is not None:
return True
if self.protocol is not None:
return True
if self.remark is not None:
return True
if self.sequence is not None:
return True
if self.sorce_operator is not None:
return True
if self.sorce_port1 is not None:
return True
if self.sorce_port2 is not None:
return True
if self.source_address is not None:
return True
if self.source_address_mask is not None:
return True
if self.source_operator is not None:
return True
if self.source_port1 is not None:
return True
if self.source_port2 is not None:
return True
if self.source_port_group is not None:
return True
if self.source_prefix_group is not None:
return True
if self.tcp_flags is not None:
return True
if self.tcp_flags_mask is not None:
return True
if self.tcp_flags_operator is not None:
return True
if self.ttl1 is not None:
return True
if self.ttl2 is not None:
return True
if self.ttl_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:access-list-sequences'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_sequence is not None:
for child_ref in self.access_list_sequence:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences']['meta_info']
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:accesses/Cisco-IOS-XR-ipv4-acl-oper:access[Cisco-IOS-XR-ipv4-acl-oper:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.access_list_sequences is not None and self.access_list_sequences._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses']['meta_info']
class Usages(object):
"""
Table of Usage statistics of access lists at
different nodes
.. attribute:: usage
Usage statistics of an access list at a node
**type**\: list of :py:class:`Usage <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Usages.Usage>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.usage = YList()
self.usage.parent = self
self.usage.name = 'usage'
class Usage(object):
"""
Usage statistics of an access list at a node
.. attribute:: access_list_name
Name of the access list
**type**\: str
.. attribute:: application_id
Application ID
**type**\: :py:class:`AclUsageAppIdEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_common_acl_datatypes.AclUsageAppIdEnumEnum>`
.. attribute:: node_name
Node where access list is applied
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: usage_details
Usage Statistics Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.application_id = None
self.node_name = None
self.usage_details = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:usages/Cisco-IOS-XR-ipv4-acl-oper:usage'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.application_id is not None:
return True
if self.node_name is not None:
return True
if self.usage_details is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Usages.Usage']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:usages'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.usage is not None:
for child_ref in self.usage:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Usages']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.accesses is not None and self.accesses._has_data():
return True
if self.prefixes is not None and self.prefixes._has_data():
return True
if self.usages is not None and self.usages._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager']['meta_info']
class Oor(object):
"""
Out Of Resources, Limits to the resources
allocatable
.. attribute:: access_list_summary
Resource limits pertaining to access lists only
**type**\: :py:class:`AccessListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.AccessListSummary>`
.. attribute:: details
Details of the Overall Out Of Resources Limits
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.Details>`
.. attribute:: oor_accesses
Resource occupation details for access lists
**type**\: :py:class:`OorAccesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorAccesses>`
.. attribute:: oor_prefixes
Resource occupation details for prefix lists
**type**\: :py:class:`OorPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorPrefixes>`
.. attribute:: prefix_list_summary
Summary of the prefix Lists resource utilization
**type**\: :py:class:`PrefixListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.PrefixListSummary>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_summary = Ipv4AclAndPrefixList.Oor.AccessListSummary()
self.access_list_summary.parent = self
self.details = Ipv4AclAndPrefixList.Oor.Details()
self.details.parent = self
self.oor_accesses = Ipv4AclAndPrefixList.Oor.OorAccesses()
self.oor_accesses.parent = self
self.oor_prefixes = Ipv4AclAndPrefixList.Oor.OorPrefixes()
self.oor_prefixes.parent = self
self.prefix_list_summary = Ipv4AclAndPrefixList.Oor.PrefixListSummary()
self.prefix_list_summary.parent = self
class Details(object):
"""
Details of the Overall Out Of Resources Limits
.. attribute:: current_configured_ac_es
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_es
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_ls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.current_configured_ac_es = None
self.current_configured_ac_ls = None
self.current_max_configurable_ac_es = None
self.current_max_configurable_ac_ls = None
self.default_max_ac_es = None
self.default_max_ac_ls = None
self.max_configurable_ac_es = None
self.max_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.current_configured_ac_es is not None:
return True
if self.current_configured_ac_ls is not None:
return True
if self.current_max_configurable_ac_es is not None:
return True
if self.current_max_configurable_ac_ls is not None:
return True
if self.default_max_ac_es is not None:
return True
if self.default_max_ac_ls is not None:
return True
if self.max_configurable_ac_es is not None:
return True
if self.max_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.Details']['meta_info']
class OorPrefixes(object):
"""
Resource occupation details for prefix lists
.. attribute:: oor_prefix
Resource occupation details for a particular prefix list
**type**\: list of :py:class:`OorPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorPrefixes.OorPrefix>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.oor_prefix = YList()
self.oor_prefix.parent = self
self.oor_prefix.name = 'oor_prefix'
class OorPrefix(object):
"""
Resource occupation details for a particular
prefix list
.. attribute:: prefix_list_name <key>
Name of a prefix list
**type**\: str
.. attribute:: current_configured_ac_es
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_es
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_ls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.current_configured_ac_es = None
self.current_configured_ac_ls = None
self.current_max_configurable_ac_es = None
self.current_max_configurable_ac_ls = None
self.default_max_ac_es = None
self.default_max_ac_ls = None
self.max_configurable_ac_es = None
self.max_configurable_ac_ls = None
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-prefixes/Cisco-IOS-XR-ipv4-acl-oper:oor-prefix[Cisco-IOS-XR-ipv4-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_name is not None:
return True
if self.current_configured_ac_es is not None:
return True
if self.current_configured_ac_ls is not None:
return True
if self.current_max_configurable_ac_es is not None:
return True
if self.current_max_configurable_ac_ls is not None:
return True
if self.default_max_ac_es is not None:
return True
if self.default_max_ac_ls is not None:
return True
if self.max_configurable_ac_es is not None:
return True
if self.max_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorPrefixes.OorPrefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.oor_prefix is not None:
for child_ref in self.oor_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorPrefixes']['meta_info']
class OorAccesses(object):
"""
Resource occupation details for access lists
.. attribute:: oor_access
Resource occupation details for a particular access list
**type**\: list of :py:class:`OorAccess <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorAccesses.OorAccess>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.oor_access = YList()
self.oor_access.parent = self
self.oor_access.name = 'oor_access'
class OorAccess(object):
"""
Resource occupation details for a particular
access list
.. attribute:: access_list_name <key>
Name of the Access List
**type**\: str
.. attribute:: current_configured_ac_es
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_es
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_ls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.current_configured_ac_es = None
self.current_configured_ac_ls = None
self.current_max_configurable_ac_es = None
self.current_max_configurable_ac_ls = None
self.default_max_ac_es = None
self.default_max_ac_ls = None
self.max_configurable_ac_es = None
self.max_configurable_ac_ls = None
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-accesses/Cisco-IOS-XR-ipv4-acl-oper:oor-access[Cisco-IOS-XR-ipv4-acl-oper:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.current_configured_ac_es is not None:
return True
if self.current_configured_ac_ls is not None:
return True
if self.current_max_configurable_ac_es is not None:
return True
if self.current_max_configurable_ac_ls is not None:
return True
if self.default_max_ac_es is not None:
return True
if self.default_max_ac_ls is not None:
return True
if self.max_configurable_ac_es is not None:
return True
if self.max_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorAccesses.OorAccess']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.oor_access is not None:
for child_ref in self.oor_access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorAccesses']['meta_info']
class AccessListSummary(object):
"""
Resource limits pertaining to access lists only
.. attribute:: details
Details containing the resource limits of the access lists
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.AccessListSummary.Details>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.details = Ipv4AclAndPrefixList.Oor.AccessListSummary.Details()
self.details.parent = self
class Details(object):
"""
Details containing the resource limits of the
access lists
.. attribute:: current_configured_ac_es
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_es
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_ls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.current_configured_ac_es = None
self.current_configured_ac_ls = None
self.current_max_configurable_ac_es = None
self.current_max_configurable_ac_ls = None
self.default_max_ac_es = None
self.default_max_ac_ls = None
self.max_configurable_ac_es = None
self.max_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:access-list-summary/Cisco-IOS-XR-ipv4-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.current_configured_ac_es is not None:
return True
if self.current_configured_ac_ls is not None:
return True
if self.current_max_configurable_ac_es is not None:
return True
if self.current_max_configurable_ac_ls is not None:
return True
if self.default_max_ac_es is not None:
return True
if self.default_max_ac_ls is not None:
return True
if self.max_configurable_ac_es is not None:
return True
if self.max_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.AccessListSummary.Details']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:access-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.details is not None and self.details._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.AccessListSummary']['meta_info']
class PrefixListSummary(object):
"""
Summary of the prefix Lists resource
utilization
.. attribute:: details
Summary Detail of the prefix list Resource Utilisation
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details>`
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.details = Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details()
self.details.parent = self
class Details(object):
"""
Summary Detail of the prefix list Resource
Utilisation
.. attribute:: current_configured_ac_es
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_es
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: current_max_configurable_ac_ls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: default_max_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv4-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.current_configured_ac_es = None
self.current_configured_ac_ls = None
self.current_max_configurable_ac_es = None
self.current_max_configurable_ac_ls = None
self.default_max_ac_es = None
self.default_max_ac_ls = None
self.max_configurable_ac_es = None
self.max_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-summary/Cisco-IOS-XR-ipv4-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.current_configured_ac_es is not None:
return True
if self.current_configured_ac_ls is not None:
return True
if self.current_max_configurable_ac_es is not None:
return True
if self.current_max_configurable_ac_ls is not None:
return True
if self.default_max_ac_es is not None:
return True
if self.default_max_ac_ls is not None:
return True
if self.max_configurable_ac_es is not None:
return True
if self.max_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.details is not None and self.details._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor.PrefixListSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_summary is not None and self.access_list_summary._has_data():
return True
if self.details is not None and self.details._has_data():
return True
if self.oor_accesses is not None and self.oor_accesses._has_data():
return True
if self.oor_prefixes is not None and self.oor_prefixes._has_data():
return True
if self.prefix_list_summary is not None and self.prefix_list_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList.Oor']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_manager is not None and self.access_list_manager._has_data():
return True
if self.oor is not None and self.oor._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta
return meta._meta_table['Ipv4AclAndPrefixList']['meta_info']
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_acl_oper.py
|
Python
|
apache-2.0
| 94,935
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowRankUpdate",
]
@tf_export("linalg.LinearOperatorLowRankUpdate")
@linear_operator.make_composite_tensor
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular`, `self_adjoint`, `positive_definite`,
`diag_update_positive` and `square`. These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]`.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
parameters = dict(
base_operator=base_operator,
u=u,
diag_update=diag_update,
v=v,
is_diag_update_positive=is_diag_update_positive,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
dtype = base_operator.dtype
if diag_update is not None:
if is_diag_update_positive and dtype.is_complex:
logging.warn("Note: setting is_diag_update_positive with a complex "
"dtype means that diagonal is real and positive.")
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = linear_operator_util.convert_nonref_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = linear_operator_util.convert_nonref_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = linear_operator_util.convert_nonref_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
self._set_graph_parents(graph_parents)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
self._check_shapes()
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.shape, self.v.shape)
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
tensor_shape.Dimension(
self.base_operator.domain_dimension).assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(
self._diag_update.shape[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.shape[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
r = tensor_shape.dimension_value(self.u.shape[-1])
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _assert_self_adjoint(self):
# Recall this operator is:
# A = L + UDV^H.
# So in one case self-adjoint depends only on L
if self.u is self.v and self.diag_update is None:
return self.base_operator.assert_self_adjoint()
# In all other cases, sufficient conditions for self-adjoint can be found
# efficiently. However, those conditions are not necessary conditions.
return super(LinearOperatorLowRankUpdate, self).assert_self_adjoint()
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.diag_operator.batch_shape)
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.u.shape[:-2])
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.v.shape[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
self.diag_operator.batch_shape_tensor())
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.u)[:-2])
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.v)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _get_uv_as_tensors(self):
"""Get (self.u, self.v) as tensors (in case they were refs)."""
u = ops.convert_to_tensor_v2_with_dispatch(self.u)
if self.v is self.u:
v = u
else:
v = ops.convert_to_tensor_v2_with_dispatch(self.v)
return u, v
def _matmul(self, x, adjoint=False, adjoint_arg=False):
u, v = self._get_uv_as_tensors()
l = self.base_operator
d = self.diag_operator
leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.matmul(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.matmul(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
u, v = self._get_uv_as_tensors()
det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v))
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _diag_part(self):
# [U D V^T]_{ii} = sum_{jk} U_{ij} D_{jk} V_{ik}
# = sum_{j} U_{ij} D_{jj} V_{ij}
u, v = self._get_uv_as_tensors()
product = u * math_ops.conj(v)
if self.diag_update is not None:
product *= array_ops.expand_dims(self.diag_update, axis=-2)
return (
math_ops.reduce_sum(product, axis=-1) + self.base_operator.diag_part())
def _log_abs_determinant(self):
u, v = self._get_uv_as_tensors()
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(
linalg_ops.cholesky(self._make_capacitance(u=u, v=v)))
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), axis=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v))
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
if self.dtype.is_complex:
log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
# If adjoint, U and V have flipped roles in the operator.
v, u = self._get_uv_as_tensors()
# Capacitance should still be computed with u=self.u and v=self.v, which
# after the "flip" on the line above means u=v, v=u. I.e. no need to
# "flip" in the capacitance call, since the call to
# matrix_solve_with_broadcast below is done with the `adjoint` argument,
# and this takes care of things.
capacitance = self._make_capacitance(u=v, v=u)
else:
u, v = self._get_uv_as_tensors()
capacitance = self._make_capacitance(u=u, v=v)
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
linalg_ops.cholesky(capacitance), vh_linv_rhs)
else:
capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self, u, v):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_operator.inverse().add_to_tensor(vh_linv_u)
return capacitance
@property
def _composite_tensor_fields(self):
return ("base_operator", "u", "diag_update", "v", "is_diag_update_positive")
|
Intel-Corporation/tensorflow
|
tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
|
Python
|
apache-2.0
| 19,994
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Iterable, Optional
from kubernetes import client as k8s_client
from polyaxon.auxiliaries import V1PolyaxonInitContainer, V1PolyaxonSidecarContainer
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.compiler.converters import CORE_CONVERTERS
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
def convert(
namespace: str,
owner_name: str,
project_name: str,
run_name: str,
run_uuid: str,
run_path: str,
compiled_operation: V1CompiledOperation,
artifacts_store: Optional[V1ConnectionType],
connection_by_names: Optional[Dict[str, V1ConnectionType]],
secrets: Optional[Iterable[V1K8sResourceType]],
config_maps: Optional[Iterable[V1K8sResourceType]],
polyaxon_sidecar: V1PolyaxonSidecarContainer = None,
polyaxon_init: V1PolyaxonInitContainer = None,
default_sa: str = None,
converters: Dict[str, Any] = CORE_CONVERTERS,
internal_auth: bool = False,
default_auth: bool = False,
) -> Dict:
if compiled_operation.has_pipeline:
raise PolyaxonCompilerError(
"Converter Error. "
"Specification with matrix/dag/schedule section is not supported in this function."
)
run_kind = compiled_operation.get_run_kind()
if run_kind not in converters:
raise PolyaxonCompilerError(
"Converter Error. "
"Specification with run kind: {} is not supported in this deployment version.".format(
run_kind
)
)
converter = converters[run_kind](
owner_name=owner_name,
project_name=project_name,
run_name=run_name,
run_uuid=run_uuid,
namespace=namespace,
polyaxon_init=polyaxon_init,
polyaxon_sidecar=polyaxon_sidecar,
internal_auth=internal_auth,
run_path=run_path,
)
if converter:
resource = converter.get_resource(
compiled_operation=compiled_operation,
artifacts_store=artifacts_store,
connection_by_names=connection_by_names,
secrets=secrets,
config_maps=config_maps,
default_sa=default_sa,
default_auth=default_auth,
)
api = k8s_client.ApiClient()
return api.sanitize_for_serialization(resource)
|
polyaxon/polyaxon
|
core/polyaxon/polypod/compiler/converter.py
|
Python
|
apache-2.0
| 2,997
|
from core.models.identity import Identity
from rest_framework import serializers
from .quota_serializer import QuotaSerializer
from .allocation_serializer import AllocationSerializer
class IdentitySerializer(serializers.ModelSerializer):
created_by = serializers.ReadOnlyField(source='creator_name')
credentials = serializers.ReadOnlyField(source='get_credentials')
id = serializers.ReadOnlyField(source='uuid')
provider_id = serializers.ReadOnlyField(source='provider_uuid')
quota = QuotaSerializer(source='get_quota')
allocation = AllocationSerializer(source='get_allocation')
class Meta:
model = Identity
fields = (
'id',
'created_by',
'provider_id',
'credentials',
'quota',
'allocation')
|
CCI-MOC/GUI-Backend
|
api/v1/serializers/identity_serializer.py
|
Python
|
apache-2.0
| 811
|
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to generate test data for cityscapes."""
import collections
import json
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import tensorflow as tf
# resources dependency
from deeplab2.data import data_utils
from deeplab2.data import dataset
flags.DEFINE_string(
'panoptic_annotation_path',
'deeplab2/data/testdata/'
'dummy_prediction.png',
'Path to annotated test image with cityscapes encoding.')
flags.DEFINE_string(
'panoptic_gt_output_path',
'deeplab2/data/testdata/'
'dummy_gt_for_vps.png',
'Path to annotated test image with Video Panoptic Segmentation encoding.')
flags.DEFINE_string(
'output_cityscapes_root',
'deeplab2/data/testdata/',
'Path to output root directory.')
FLAGS = flags.FLAGS
# Cityscapes label, using `TrainId`.
_CITYSCAPES_IGNORE = 255
# Each valid (not ignored) label below is a tuple of (TrainId, EvalId)
_CITYSCAPES_CAR = (13, 26)
_CITYSCAPES_TREE = (8, 21)
_CITYSCAPES_SKY = (10, 23)
_CITYSCAPES_BUILDING = (2, 11)
_CITYSCAPES_ROAD = (0, 7)
_IS_CROWD = 'is_crowd'
_NOT_CROWD = 'not_crowd'
_CLASS_HAS_INSTANCES_LIST = dataset.CITYSCAPES_PANOPTIC_INFORMATION.class_has_instances_list
_PANOPTIC_LABEL_DIVISOR = dataset.CITYSCAPES_PANOPTIC_INFORMATION.panoptic_label_divisor
_FILENAME_PREFIX = 'dummy_000000_000000'
def create_test_data(annotation_path):
"""Creates cityscapes panoptic annotation, vps annotation and segment info.
Our Video Panoptic Segmentation (VPS) encoding uses ID == semantic trainID *
1000 + instance ID (starting at 1) with instance ID == 0 marking
crowd regions.
Args:
annotation_path: The path to the annotation to be loaded.
Returns:
A tuple of cityscape annotation, vps annotation and segment infos.
"""
# Convert panoptic labels to cityscapes label format.
# Dictionary mapping converted panoptic annotation to its corresponding
# Cityscapes label. Here the key is encoded by converting each RGB pixel
# value to 1 * R + 256 * G + 256 * 256 * B.
panoptic_label_to_cityscapes_label = {
0: (_CITYSCAPES_IGNORE, _NOT_CROWD),
31110: (_CITYSCAPES_CAR, _NOT_CROWD),
31354: (_CITYSCAPES_CAR, _IS_CROWD),
35173: (_CITYSCAPES_CAR, _NOT_CROWD),
488314: (_CITYSCAPES_CAR, _IS_CROWD),
549788: (_CITYSCAPES_CAR, _IS_CROWD),
1079689: (_CITYSCAPES_CAR, _IS_CROWD),
1341301: (_CITYSCAPES_CAR, _NOT_CROWD),
1544590: (_CITYSCAPES_CAR, _NOT_CROWD),
1926498: (_CITYSCAPES_CAR, _NOT_CROWD),
4218944: (_CITYSCAPES_TREE, _NOT_CROWD),
4251840: (_CITYSCAPES_SKY, _NOT_CROWD),
6959003: (_CITYSCAPES_BUILDING, _NOT_CROWD),
# To be merged with the building segment above.
8396960: (_CITYSCAPES_BUILDING, _NOT_CROWD),
8413312: (_CITYSCAPES_ROAD, _NOT_CROWD),
}
with tf.io.gfile.GFile(annotation_path, 'rb') as f:
panoptic = data_utils.read_image(f.read())
# Input panoptic annotation is RGB color coded, here we convert each pixel
# to a unique number to avoid comparing 3-tuples.
panoptic = np.dot(panoptic, [1, 256, 256 * 256])
# Creates cityscapes panoptic map. Cityscapes use ID == semantic EvalId for
# `stuff` segments and `thing` segments with `iscrowd` label, and
# ID == semantic EvalId * 1000 + instance ID (starting from 0) for other
# `thing` segments.
cityscapes_panoptic = np.zeros_like(panoptic, dtype=np.int32)
# Creates Video Panoptic Segmentation (VPS) map. We use ID == semantic
# trainID * 1000 + instance ID (starting at 1) with instance ID == 0 marking
# crowd regions.
vps_panoptic = np.zeros_like(panoptic, dtype=np.int32)
num_instances_per_class = collections.defaultdict(int)
unique_labels = np.unique(panoptic)
# Dictionary that maps segment id to segment info.
segments_info = {}
for label in unique_labels:
cityscapes_label, is_crowd = panoptic_label_to_cityscapes_label[label]
selected_pixels = panoptic == label
if cityscapes_label == _CITYSCAPES_IGNORE:
vps_panoptic[selected_pixels] = (
_CITYSCAPES_IGNORE * _PANOPTIC_LABEL_DIVISOR)
continue
train_id, eval_id = tuple(cityscapes_label)
cityscapes_id = eval_id
vps_id = train_id * _PANOPTIC_LABEL_DIVISOR
if train_id in _CLASS_HAS_INSTANCES_LIST:
# `thing` class.
if is_crowd != _IS_CROWD:
cityscapes_id = (
eval_id * _PANOPTIC_LABEL_DIVISOR +
num_instances_per_class[train_id])
# First instance should have ID 1.
vps_id += num_instances_per_class[train_id] + 1
num_instances_per_class[train_id] += 1
cityscapes_panoptic[selected_pixels] = cityscapes_id
vps_panoptic[selected_pixels] = vps_id
pixel_area = int(np.sum(selected_pixels))
if cityscapes_id in segments_info:
logging.info('Merging segments with label %d into segment %d', label,
cityscapes_id)
segments_info[cityscapes_id]['area'] += pixel_area
else:
segments_info[cityscapes_id] = {
'area': pixel_area,
'category_id': train_id,
'id': cityscapes_id,
'iscrowd': 1 if is_crowd == _IS_CROWD else 0,
}
cityscapes_panoptic = np.dstack([
cityscapes_panoptic % 256, cityscapes_panoptic // 256,
cityscapes_panoptic // 256 // 256
])
vps_panoptic = np.dstack(
[vps_panoptic % 256, vps_panoptic // 256, vps_panoptic // 256 // 256])
return (cityscapes_panoptic.astype(np.uint8), vps_panoptic.astype(np.uint8),
list(segments_info.values()))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
data_path = FLAGS.panoptic_annotation_path # OSS: removed internal filename loading.
panoptic_map, vps_map, segments_info = create_test_data(data_path)
panoptic_map_filename = _FILENAME_PREFIX + '_gtFine_panoptic.png'
panoptic_map_path = os.path.join(FLAGS.output_cityscapes_root, 'gtFine',
'cityscapes_panoptic_dummy_trainId',
panoptic_map_filename)
gt_output_path = FLAGS.panoptic_gt_output_path # OSS: removed internal filename loading.
with tf.io.gfile.GFile(gt_output_path, 'wb') as f:
Image.fromarray(vps_map).save(f, format='png')
panoptic_map_path = panoptic_map_path # OSS: removed internal filename loading.
with tf.io.gfile.GFile(panoptic_map_path, 'wb') as f:
Image.fromarray(panoptic_map).save(f, format='png')
json_annotation = {
'annotations': [{
'file_name': _FILENAME_PREFIX + '_gtFine_panoptic.png',
'image_id': _FILENAME_PREFIX,
'segments_info': segments_info
}]
}
json_annotation_path = os.path.join(FLAGS.output_cityscapes_root, 'gtFine',
'cityscapes_panoptic_dummy_trainId.json')
json_annotation_path = json_annotation_path # OSS: removed internal filename loading.
with tf.io.gfile.GFile(json_annotation_path, 'w') as f:
json.dump(json_annotation, f, indent=2)
if __name__ == '__main__':
app.run(main)
|
google-research/deeplab2
|
data/testdata/create_test_data.py
|
Python
|
apache-2.0
| 7,692
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import pytest
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from model_bakery import baker
from django.urls import reverse
from rest_framework import status
def create_publications(count):
entries = []
for pk in range(1, count+1):
entries.append(
baker.prepare(
"emgapi.Publication",
pk=pk,
pubmed_id=pk,
pub_title="Publication findme",
pub_abstract="abcdefghijklmnoprstuvwxyz"
)
)
for pk in range(count+1, 2*count+1):
entries.append(
baker.prepare(
"emgapi.Publication",
pk=pk,
pubmed_id=pk,
pub_title="Publication hide",
pub_abstract="abcdefghijklmnoprstuvwxyz"
)
)
return entries
def create_studies(count):
entries = []
for pk in range(1, count+1):
_biome = baker.make('emgapi.Biome', pk=pk)
entries.append(
baker.prepare(
"emgapi.Study",
pk=pk,
biome=_biome,
study_name="Study findme",
study_abstract="abcdefghijklmnoprstuvwxyz",
is_public=1
)
)
for pk in range(count+1, 2*count+1):
_biome = baker.make('emgapi.Biome', pk=pk)
entries.append(
baker.prepare(
"emgapi.Study",
pk=pk,
biome=_biome,
study_name="Study hide",
study_abstract="abcdefghijklmnoprstuvwxyz",
is_public=1
)
)
return entries
def create_samples(count):
entries = []
for pk in range(1, count+1):
_biome = baker.make('emgapi.Biome', pk=pk)
_study = baker.make('emgapi.Study', pk=pk, biome=_biome, is_public=1)
entries.append(
baker.prepare(
"emgapi.Sample",
pk=pk,
biome=_biome,
studies=[_study],
sample_name="Sample findme",
is_public=1
)
)
for pk in range(count+1, 2*count+1):
_biome = baker.make('emgapi.Biome', pk=pk)
_study = baker.make('emgapi.Study', pk=pk, biome=_biome, is_public=1)
entries.append(
baker.prepare(
"emgapi.Sample",
pk=pk,
biome=_biome,
studies=[_study],
sample_name="Sample hideme",
is_public=1
)
)
return entries
class TestFullTextIndexAPI(object):
@pytest.mark.parametrize(
'_model, _dashed, _view, search_term, search_attr, counts',
[
('Study', 'studies', 'emgapi_v1:studies',
'findme', 'study-name', 5),
('Sample', 'samples', 'emgapi_v1:samples',
'findme', 'sample-name', 5),
('Publication', 'publications', 'emgapi_v1:publications',
'findme', 'pub-title', 5),
]
)
@pytest.mark.django_db
def test_search(self, live_server, client,
_model, _dashed, _view,
search_term, search_attr, counts):
view_name = _view.split(":")[1]
klass = getattr(importlib.import_module("emgapi.models"), _model)
entries = globals()["create_%s" % view_name](counts)
klass.objects.bulk_create(entries)
assert len(klass.objects.all()) == 2*counts
view_name = "%s-list" % _view
qs = urlencode({'search': search_term})
url = "%s%s?%s" % (live_server.url, reverse(view_name), qs)
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
rsp = response.json()
# Meta
assert rsp['meta']['pagination']['page'] == 1
assert rsp['meta']['pagination']['pages'] == 1
assert rsp['meta']['pagination']['count'] == counts
# Data
assert len(rsp['data']) == counts
for d in rsp['data']:
assert d['type'] == _dashed
assert d['attributes'][search_attr] == "%s findme" % _model
assert not d['attributes'][search_attr] == "%s hideme" % _model
|
EBI-Metagenomics/emgapi
|
tests/api/test_fulltextindex.py
|
Python
|
apache-2.0
| 4,961
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroBoostingTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.booster_dim = self._create_variable(name='booster_dim', src=['guest'], dst=['host'])
self.stop_flag = self._create_variable(name='stop_flag', src=['guest'], dst=['host'])
self.predict_start_round = self._create_variable(name='predict_start_round', src=['guest'], dst=['host'])
|
FederatedAI/FATE
|
python/federatedml/transfer_variable/transfer_class/hetero_boosting_transfer_variable.py
|
Python
|
apache-2.0
| 1,448
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import pytz
import re
import datetime
from django.utils import timezone
import os
import traceback
import m3u8
from django.conf import settings
from geocamUtil.loader import LazyGetModelByName, getClassByName
from xgds_core.views import getDelay
from xgds_video.recordingUtil import getFudgeForSource
TIME_ZONE = pytz.timezone(settings.XGDS_VIDEO_TIME_ZONE['code'])
SEGMENT_MODEL = LazyGetModelByName(settings.XGDS_VIDEO_SEGMENT_MODEL)
def getDelaySeconds(flightName):
delay = getDelay()
# the below is already subtracted when we use the delay seconds.
# delay -= settings.XGDS_VIDEO_BUFFER_FUDGE_FACTOR
return delay
def getShortTimeString(dateTime):
return dateTime.strftime("%H:%M:%S")
def convertUtcToLocal(time):
if time:
time = time.replace(tzinfo=pytz.utc)
return time.astimezone(TIME_ZONE)
else:
return ""
# def pythonDatetimeToJSON(pyDateTime):
# if pyDateTime:
# return {"year": pyDateTime.year, "month": pyDateTime.month, "day": pyDateTime.day,
# "hour": pyDateTime.hour, "min": pyDateTime.minute, "seconds": pyDateTime.second}
# else:
# return ""
def setSegmentEndTimes(segments, episode, source):
"""
If both the episode endtime and segments' endtimes are not available (we are live),
set the segment end time as endTime value inferred from the index file
Given dictionary of segments (key = source, value = segment).
"""
if not episode:
print "CANNOT set segment end times for empty episode" + str(episode)
return
# for sourceShortName, segments in sourceSegmentsDict.iteritems():
flightName = episode.shortName + '_' + source.shortName
# segments = sourceSegmentsDict[source.shortName]
segments = sorted(segments, key=lambda segment: segment.segNumber)
# if last segment has no endTime
if (segments[-1].endTime is None) and (episode.endTime is None):
segment = segments[-1] # last segment
#GET_INDEX_FILE_METHOD = getClassByName(settings.XGDS_VIDEO_INDEX_FILE_METHOD)
#indexFilePath = GET_INDEX_FILE_METHOD(flightName, source.shortName, segment.segNumber)
indexFilePath = '%s/%s' % (getSegmentPath(flightName, source.shortName, segment.segNumber), segment.indexFileName)
path = settings.DATA_ROOT + indexFilePath
segmentDuration = getTotalDuration(path)
segment.endTime = segment.startTime + datetime.timedelta(seconds=segmentDuration)
segment.save()
def find_between(s, first, last):
"""
Helper that finds the substring between first and last strings.
"""
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def getTotalDuration(path):
#TODO use the m3u8 library to get the duration
"""
Given path to the index file of a segment, returns the total duration of the
segment
"""
try:
indexFile = open(path)
except IOError:
print "path not found for segments " + path
return 0
totalDuration = 0
for line in indexFile:
if line.startswith("#EXTINF"):
timeValue = find_between(line, ":", ",")
totalDuration += int(float(timeValue))
indexFile.close()
return totalDuration
def findEndMarker(item):
if re.match("#EXT-X-ENDLIST", item):
return True
def getSegmentPath(flightName, sourceName, number):
if sourceName:
return '%s_%s/Video/Recordings/Segment%03d/' % (flightName, sourceName, int(number))
else:
return '%s/Video/Recordings/Segment%03d/' % (flightName, int(number))
def getIndexFilePath(flightName, sourceShortName, segmentNumber):
if '_' in flightName:
splits = flightName.split('_')
else:
splits = [flightName, sourceShortName]
if sourceShortName:
if flightName.endswith(sourceShortName):
episode_shortName = splits[0]
else:
episode_shortName = flightName
else:
episode_shortName = splits[0]
sourceShortName = splits[1]
try:
segments = SEGMENT_MODEL.get().objects.filter(episode__shortName=episode_shortName,
segNumber=int(segmentNumber),
source__shortName=sourceShortName)
# should only be one
indexFileName = segments[0].indexFileName
index_file_path = os.path.join(getSegmentPath(flightName, sourceShortName, segmentNumber), indexFileName)
# print('GOT INDEX FILE PATH %s' % index_file_path)
return index_file_path, segments[0]
except:
raise Exception('Segments not found for %s: %s: %s' % (episode_shortName, sourceShortName, segmentNumber))
def getNumChunksFromEndForDelay(delayTime, indexPath):
index = None
segCount = 0
totalTime = 0
valid = False
try:
index = m3u8.load(indexPath)
segList = index.segments
for s in reversed(segList):
totalTime += s.duration
segCount += 1
if totalTime >= delayTime:
break
valid = True
except:
traceback.print_exc()
return segCount, index, valid
def getSegmentNumber(segmentObj):
segFileName = os.path.basename(segmentObj.uri)
name, ext = os.path.splitext(segFileName)
try:
baseName, otherNumber, segNum = name.split("_")
except:
try:
baseName, segNum = name.split("-")
except:
pattern = '(?:[a-z]*[A-Z]*)*([0-9]*)'
match = re.search(pattern, name)
if match:
return int(match.group(1))
return int(segNum)
def getIndexFileContents(flightName=None, sourceShortName=None, segmentNumber=None, forceEndlist=False):
""" This is truncating the last n rows from the m3u8 file and reappending the end and the metadata at the top.
This fakes our delay
"""
# Look up path to index file
GET_INDEX_FILE_METHOD = getClassByName(settings.XGDS_VIDEO_INDEX_FILE_METHOD)
indexFileSuffix, segment = GET_INDEX_FILE_METHOD(flightName, sourceShortName, segmentNumber)
indexFilePath = os.path.join(settings.DATA_ROOT, indexFileSuffix)
segmentDirectoryUrl = settings.DATA_URL + os.path.dirname(indexFileSuffix)
valid = False
try:
videoDelayInSecs = getClassByName(settings.XGDS_VIDEO_DELAY_AMOUNT_METHOD)(flightName)
if videoDelayInSecs > 0:
calculatedDelay = videoDelayInSecs
#if the segment is ended then this may want to be different
if segment.endTime:
# 1. calculate secondsAgo = nowTime - segment.endTime
secondsAgo = (timezone.now() - segment.endTime).total_seconds()
# 2. if secondsAgo < delay, calculatedDelay = videoDelayInSecs - secondsAgo
calculatedDelay = max(videoDelayInSecs - secondsAgo, 0)
if calculatedDelay > 0:
(videoDelayInChunks, m3u8_index, valid) = getNumChunksFromEndForDelay(calculatedDelay - getFudgeForSource(sourceShortName), indexFilePath)
if videoDelayInChunks > 0:
m3u8_index.is_endlist = False
else:
# I *think* we should only get here if we have signal loss during a live feed and are playing out the last
# bit of playlist in which case we *should* add an end tag.
try:
if segment.endTime:
m3u8_index = m3u8.load(indexFilePath)
if segment.episode.endTime:
m3u8_index.is_endlist = True
valid = True
videoDelayInChunks = 0
#TODO broadcast segment end, show glitch in progress screen
except:
traceback.print_exc()
else:
try:
m3u8_index = m3u8.load(indexFilePath)
if forceEndlist:
m3u8_index.is_endlist = True
videoDelayInChunks = 0
valid = True
except:
traceback.print_exc()
if not valid:
if m3u8_index:
try:
m3u8_index.segments = None
except:
pass
return (m3u8_index.dumps(), indexFilePath)
else:
return (None, None)
m3u8_chunks = m3u8_index.segments
if m3u8_chunks and len(m3u8_chunks) > 0:
# this was probably to handle vlc badness
# if segments[0].duration > 100:
# del segments[0]
if videoDelayInChunks > 0: # and len(m3u8_chunks) > videoDelayInChunks:
del m3u8_chunks[-videoDelayInChunks:]
del m3u8_chunks[:-settings.XGDS_VIDEO_LIVE_PLAYLIST_SIZE_TO_PLAYER]
if m3u8_chunks:
firstSegNum = getSegmentNumber(m3u8_index.segments[0])
else:
firstSegNum = 0
m3u8_index.media_sequence = firstSegNum
for s in m3u8_chunks:
s.uri = str(segmentDirectoryUrl) + '/' + s.uri
return (m3u8_index.dumps(), indexFilePath)
except:
#TODO handle better
traceback.print_exc()
traceback.print_stack()
return (None, None)
def calculate_ts_file(folder_name, seconds, index_file_name=settings.XGDS_VIDEO_INDEX_FILE_NAME):
"""
Find the ts file that is s_int seconds into the recording
:param folder_name: that holds ts files
:param seconds: seconds into the recording
:param index_file_name: usually prog_index.m3u8
:return: tsfile name and offset seconds into the file
"""
# open the prog_index.m3u8
if folder_name.endswith('.m3u8'):
m3u8_filename = folder_name
else:
m3u8_filename = os.path.join(folder_name, index_file_name)
m3u8_obj = m3u8.load(m3u8_filename)
accumulated_time = 0
segment_count = len(m3u8_obj.segments)
seconds_float = float(seconds)
found = False
file_number = 0
for seg_num in range(segment_count):
next_delta = m3u8_obj.segments[seg_num].duration
if accumulated_time + next_delta >= seconds_float:
# save segment number
file_number = seg_num
found = True
break
accumulated_time = accumulated_time + next_delta
if not found:
msg = "Requested time %f is outside range of %s, %f " % (seconds, index_file_name, accumulated_time)
raise Exception(msg)
# if seconds > accumulated_time + next_delta:
# msg = "Requested time %f is outside range of %s, %f " % (seconds, index_file_name, accumulated_time + next_delta)
# raise Exception(msg)
return m3u8_obj.segments[file_number].uri, seconds_float - accumulated_time
|
xgds/xgds_video
|
xgds_video/util.py
|
Python
|
apache-2.0
| 11,763
|
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import logging
import os
import time
import yaml
from ochopod.core.fsm import diagnostic
from ochopod.core.utils import merge, retry, shell
from random import choice
from requests import delete, post
from threading import Thread
from toolset.io import fire, run
from toolset.tool import Template
from yaml import YAMLError
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
class _Automation(Thread):
def __init__(self, proxy, template, overrides, namespace, pods, release, suffix, timeout, strict):
super(_Automation, self).__init__()
self.namespace = namespace
self.out = \
{
'ok': False,
'up': []
}
self.overrides = overrides
self.pods = pods
self.proxy = proxy
self.release = release
self.suffix = suffix
self.strict = strict
self.template = template
self.timeout = max(timeout, 5)
self.start()
def run(self):
try:
#
# - we need to pass the framework master IPs around (ugly)
#
assert 'MARATHON_MASTER' in os.environ, '$MARATHON_MASTER not specified (check your portal pod)'
master = choice(os.environ['MARATHON_MASTER'].split(','))
headers = \
{
'content-type': 'application/json',
'accept': 'application/json'
}
with open(self.template, 'r') as f:
#
# - parse the template yaml file (e.g container definition)
#
raw = yaml.load(f)
assert raw, 'empty YAML input (user error ?)'
#
# - merge with our defaults
# - we want at least the cluster & image settings
# - TCP 8080 is added by default to the port list
#
defaults = \
{
'start': True,
'debug': False,
'settings': {},
'ports': [8080],
'verbatim': {}
}
cfg = merge(defaults, raw)
assert 'cluster' in cfg, 'cluster identifier undefined (user error ?)'
assert 'image' in cfg, 'docker image undefined (user error ?)'
#
# - if a suffix is specified append it to the cluster identifier
#
if self.suffix:
cfg['cluster'] = '%s-%s' % (cfg['cluster'], self.suffix)
#
# - timestamp the application (we really want a new uniquely identified application)
# - lookup the optional overrides and merge with our pod settings if specified
# - this is what happens when the -o option is used
#
stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')
qualified = '%s.%s' % (self.namespace, cfg['cluster'])
application = 'ochopod.%s-%s' % (qualified, stamp)
if qualified in self.overrides:
blk = self.overrides[qualified]
logger.debug('%s : overriding %d settings (%s)' % (self.template, len(blk), qualified))
cfg['settings'] = merge(cfg['settings'], blk)
def _nullcheck(cfg, prefix):
#
# - walk through the settings and flag any null value
#
missing = []
if cfg is not None:
for key, value in cfg.items():
if value is None:
missing += ['%s.%s' % ('.'.join(prefix), key)]
elif isinstance(value, dict):
missing += _nullcheck(value, prefix + [key])
return missing
missing = _nullcheck(cfg['settings'], ['pod'])
assert not missing, '%d setting(s) missing ->\n\t - %s' % (len(missing), '\n\t - '.join(missing))
#
# - if we still have no target default it to 1 single pod
#
if not self.pods:
self.pods = 1
#
# - setup our port list
# - the port binding is specified either by an integer (container port -> dynamic mesos port), by
# two integers (container port -> host port) or by an integer followed by a * (container port ->
# same port on the host)
# - on top of that, all those options allow to specify whether the protocol is TCP or UDP by adding
# the desired protocol after the binding (e.g. '8080 tcp' or '8125 * udp'. TCP is the default if no
# protocol is specified.
# - the marathon pods must by design map /etc/mesos
#
def _parse_port(token):
#
# - tries to return an int if possible, a string otherwise
#
def get_token_no_protocol(token):
# - remove the protocol piece
t = token[:-4].strip()
try:
return int(t)
except ValueError:
return t
if isinstance(token, str) and token.lower().endswith(' udp'):
protocol = 'udp'
token_no_protocol = get_token_no_protocol(token)
elif isinstance(token, str) and token.lower().endswith(' tcp'):
protocol = 'tcp'
token_no_protocol = get_token_no_protocol(token)
else:
# - TCP is the default
protocol = 'tcp'
token_no_protocol = token
if isinstance(token_no_protocol, int):
return {'containerPort': token_no_protocol, 'protocol': protocol}
elif isinstance(token_no_protocol, str) and token_no_protocol.endswith(' *'):
port = int(token_no_protocol[:-2])
return {'containerPort': port, 'hostPort': port, 'protocol': protocol}
elif isinstance(token_no_protocol, str):
ports = token_no_protocol.split(' ')
assert len(ports) == 2, 'invalid port syntax (must be two integers separated by 1+ spaces optionally followed by the protocol (tcp or udp, defaults to tcp))'
return {'containerPort': int(ports[0]), 'hostPort': int(ports[1]), 'protocol': protocol}
else:
assert 0, 'invalid port syntax ("%s")' % token
#
# - craft the docker image specifier
# - if -r is used make sure to add (or override) the :<label> suffix
#
image = cfg['image']
tokens = image.split(':')
image = '%s:%s' % (tokens[0], self.release) if self.release else image
#
# - note the marathon-ec2 ochopod bindings will set the application hint automatically
# via environment variable (e.g no need to specify it here)
# - make sure to mount /etc/mesos and /opt/mesosphere to account for various mesos installs
#
ports = [_parse_port(token) for token in cfg['ports']] if 'ports' in cfg else []
spec = \
{
'id': application,
'instances': self.pods,
'env':
{
'ochopod_cluster': cfg['cluster'],
'ochopod_debug': str(cfg['debug']).lower(),
'ochopod_start': str(cfg['start']).lower(),
'ochopod_namespace': self.namespace,
'pod': json.dumps(cfg['settings'])
},
'container':
{
'type': 'DOCKER',
'docker':
{
'forcePullImage': True,
'image': image,
'network': 'BRIDGE',
'portMappings': ports
},
'volumes':
[
{
'containerPath': '/etc/mesos',
'hostPath': '/etc/mesos',
'mode': 'RO'
},
{
'containerPath': '/opt/mesosphere',
'hostPath': '/opt/mesosphere',
'mode': 'RO'
}
]
}
}
#
# - if we have a 'verbatim' block in our image definition yaml, merge it now
#
if 'verbatim' in cfg:
spec = merge(cfg['verbatim'], spec)
#
# - pick a marathon master at random
# - fire the POST /v2/apps to create our application
# - this will indirectly spawn our pods
#
url = 'http://%s/v2/apps' % master
reply = post(url, data=json.dumps(spec), headers=headers)
code = reply.status_code
logger.debug('-> %s (HTTP %d)' % (url, code))
assert code == 200 or code == 201, 'submission failed (HTTP %d)' % code
#
# - wait for all the pods to be in the 'running' mode
# - the 'application' hint is set by design to the marathon application identifier
# - the sequence counters allocated to our new pods are returned as well
#
target = ['dead', 'running'] if self.strict else ['dead', 'stopped', 'running']
@retry(timeout=self.timeout, pause=3, default={})
def _spin():
def _query(zk):
replies = fire(zk, qualified, 'info')
return [(hints['process'], seq) for seq, hints, _ in replies.values()
if hints['application'] == application and hints['process'] in target]
js = run(self.proxy, _query)
assert len(js) == self.pods, 'not all pods running yet'
return js
js = _spin()
running = sum(1 for state, _ in js if state is not 'dead')
up = [seq for _, seq in js]
self.out['up'] = up
self.out['ok'] = self.pods == running
logger.debug('%s : %d/%d pods are running ' % (self.template, running, self.pods))
if not up:
#
# - nothing is running (typically because the image has an issue and is not
# not booting the ochopod script for instance, which happens often)
# - in that case fire a HTTP DELETE against the marathon application to clean it up
#
url = 'http://%s/v2/apps/%s' % (master, application)
reply = delete(url, headers=headers)
code = reply.status_code
logger.debug('-> %s (HTTP %d)' % (url, code))
assert code == 200 or code == 204, 'application deletion failed (HTTP %d)' % code
except AssertionError as failure:
logger.debug('%s : failed to deploy -> %s' % (self.template, failure))
except YAMLError as failure:
if hasattr(failure, 'problem_mark'):
mark = failure.problem_mark
logger.debug('%s : invalid deploy.yml (line %s, column %s)' % (self.template, mark.line+1, mark.column+1))
except Exception as failure:
logger.debug('%s : failed to deploy -> %s' % (self.template, diagnostic(failure)))
def join(self, timeout=None):
Thread.join(self)
return self.out
def go():
class _Tool(Template):
help = \
'''
Spawns a marathon application for each of the specified cluster(s). The tool will by default wait for
all containers to be up (but not necessarily configured & clustered yet) while using the --strict
switch will ensure we wait for all containers to be fully configured.
If no container ended up being deployed the underlying marathon application will automatically get
deleted.It is possible to add a suffix to the cluster identifier defined in the yaml configuration
by using the -s option (typically to run the same functionality in different contexts).
This tool supports optional output in JSON format for 3rd-party integration via the -j switch.
Please note we force a docker image pull when instantiating the new application.
'''
tag = 'deploy'
def customize(self, parser):
parser.add_argument('containers', type=str, nargs='+', help='1+ YAML definitions (e.g marathon.yml)')
parser.add_argument('-j', action='store_true', dest='json', help='json output')
parser.add_argument('-n', action='store', dest='namespace', type=str, default='marathon', help='namespace')
parser.add_argument('-o', action='store', dest='overrides', type=str, nargs='+', help='overrides YAML file(s)')
parser.add_argument('-p', action='store', dest='pods', type=int, help='number of pods to deploy')
parser.add_argument('-r', action='store', dest='release', type=str, help='docker image release tag')
parser.add_argument('-s', action='store', dest='suffix', type=str, help='optional cluster suffix')
parser.add_argument('-t', action='store', dest='timeout', type=int, default=60, help='timeout in seconds')
parser.add_argument('--strict', action='store_true', dest='strict', help='waits until all pods are running')
def body(self, args, _, proxy):
assert len(args.containers), 'at least one container definition is required'
#
# - load the overrides from yaml if specified
#
overrides = {}
if not args.overrides:
args.overrides = []
for path in args.overrides:
try:
with open(path, 'r') as f:
overrides.update(yaml.load(f))
except IOError:
logger.debug('unable to load %s' % args.overrides)
except YAMLError as failure:
if hasattr(failure, 'problem_mark'):
mark = failure.problem_mark
assert 0, '%s is invalid (line %s, column %s)' % (args.overrides, mark.line+1, mark.column+1)
#
# - run the workflow proper (one thread per container definition)
#
threads = {template: _Automation(
proxy,
template,
overrides,
args.namespace,
args.pods,
args.release,
args.suffix,
args.timeout,
args.strict) for template in args.containers}
#
# - wait for all our threads to join
#
n = len(threads)
outcome = {key: thread.join() for key, thread in threads.items()}
pct = (100 * sum(1 for _, js in outcome.items() if js['ok'])) / n if n else 0
up = sum(len(js['up']) for _, js in outcome.items())
logger.info(json.dumps(outcome) if args.json else '%d%% success (+%d pods)' % (pct, up))
return 0 if pct == 100 else 1
return _Tool()
|
autodesk-cloud/ochothon
|
images/portal/resources/toolset/toolset/commands/deploy.py
|
Python
|
apache-2.0
| 17,510
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import functools
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
class bundle_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = bundle_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bundle_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("bundle_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(bundle_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, exp_type=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("exp_type = ");
q.text("%#x" % self.exp_type)
q.breakable()
q.text('}')
bundle_prop.subtypes[65535] = experimenter
|
floodlight/loxigen-artifacts
|
pyloxi3/loxi/of14/bundle_prop.py
|
Python
|
apache-2.0
| 3,649
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='sql4json',
version='0.3.0',
description='Python SQL library and command line for querying JSON documents',
author='Brian Hendriks',
url='http://github.com/bheni/sql4json',
packages=['sql4json', 'sql4json.boolean_expressions'],
scripts=['bin/sql4json', 'bin/mpack2json']
)
|
bheni/sql4json
|
setup.py
|
Python
|
apache-2.0
| 377
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
"""For uploading files from client to server."""
from warnings import warn
from muntjac.ui.abstract_component import AbstractComponent
from muntjac.ui.component import \
IFocusable, Event as ComponentEvent
from muntjac.terminal.stream_variable import \
IStreamVariable, IStreamingEvent
from muntjac.terminal.gwt.server.exceptions import \
NoInputStreamException, NoOutputStreamException
from muntjac.util import OrderedSet
class IStartedListener(object):
"""Receives the events when the upload starts.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def uploadStarted(self, event):
"""Upload has started.
@param event:
the Upload started event.
"""
raise NotImplementedError
class IFinishedListener(object):
"""Receives the events when the uploads are ready.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def uploadFinished(self, event):
"""Upload has finished.
@param event:
the Upload finished event.
"""
raise NotImplementedError
class IFailedListener(object):
"""Receives events when the uploads are finished, but unsuccessful.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def uploadFailed(self, event):
"""Upload has finished unsuccessfully.
@param event:
the Upload failed event.
"""
raise NotImplementedError
class ISucceededListener(object):
"""Receives events when the uploads are successfully finished.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def uploadSucceeded(self, event):
"""Upload successfull..
@param event:
the Upload successful event.
"""
raise NotImplementedError
class IProgressListener(object):
"""IProgressListener receives events to track progress of upload."""
def updateProgress(self, readBytes, contentLength):
"""Updates progress to listener.
@param readBytes:
bytes transferred
@param contentLength:
total size of file currently being uploaded, -1 if unknown
"""
raise NotImplementedError
_UPLOAD_FINISHED_METHOD = getattr(IFinishedListener, 'uploadFinished')
_UPLOAD_FAILED_METHOD = getattr(IFailedListener, 'uploadFailed')
_UPLOAD_STARTED_METHOD = getattr(IStartedListener, 'uploadStarted')
_UPLOAD_SUCCEEDED_METHOD = getattr(ISucceededListener, 'uploadSucceeded')
class Upload(AbstractComponent, IFocusable): #IComponent,
"""IComponent for uploading files from client to server.
The visible component consists of a file name input box and a browse
button and an upload submit button to start uploading.
The Upload component needs a StringIO to write the uploaded
data. You need to implement the upload.IReceiver interface and return the
output stream in the receiveUpload() method.
You can get an event regarding starting (StartedEvent), progress
(ProgressEvent), and finishing (FinishedEvent) of upload by implementing
IStartedListener, IProgressListener, and IFinishedListener, respectively.
The IFinishedListener is called for both failed and succeeded uploads. If
you wish to separate between these two cases, you can use
ISucceededListener (SucceededEvenet) and IFailedListener (FailedEvent).
The upload component does not itself show upload progress, but you can use
the ProgressIndicator for providing progress feedback by implementing
IProgressListener and updating the indicator in updateProgress().
Setting upload component immediate initiates the upload as soon as a file
is selected, instead of the common pattern of file selection field and
upload button.
Note! Because of browser dependent implementations of <input type="file">
element, setting size for Upload component is not supported. For some
browsers setting size may work to some extent.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
CLIENT_WIDGET = None #ClientWidget(VUpload, LoadStyle.LAZY)
def __init__(self, caption=None, uploadReceiver=None):
"""Creates a new instance of Upload.
The receiver must be set before performing an upload.
"""
super(Upload, self).__init__()
#: Should the field be focused on next repaint?
self._focus = False
#: The tab order number of this field.
self._tabIndex = 0
#: The output of the upload is redirected to this receiver.
self._receiver = None
self._isUploading = False
self._contentLength = -1
self._totalBytes = None
self._buttonCaption = 'Upload'
#: ProgressListeners to which information about progress
# is sent during upload
self._progressListeners = OrderedSet()
self._progressCallbacks = dict()
self._interrupted = False
self._notStarted = None
self._nextid = 0
#: Flag to indicate that submitting file has been requested.
self._forceSubmit = None
if caption:
self.setCaption(caption)
if uploadReceiver is not None:
self._receiver = uploadReceiver
self._streamVariable = None
def changeVariables(self, source, variables):
"""Invoked when the value of a variable has changed.
@see: L{AbstractComponent.changeVariables}
"""
if 'pollForStart' in variables:
idd = variables.get('pollForStart')
if not self._isUploading and idd == self._nextid:
self._notStarted = True
self.requestRepaint()
else:
pass
def paintContent(self, target):
"""Paints the content of this component.
@param target:
Target to paint the content on.
@raise PaintException:
if the paint operation failed.
"""
if self._notStarted:
target.addAttribute('notStarted', True)
self._notStarted = False
return
if self._forceSubmit:
target.addAttribute('forceSubmit', True)
self._forceSubmit = True
return
# The field should be focused
if self._focus:
target.addAttribute('focus', True)
# The tab ordering number
if self._tabIndex >= 0:
target.addAttribute('tabindex', self._tabIndex)
target.addAttribute('state', self._isUploading)
if self._buttonCaption is not None:
target.addAttribute('buttoncaption', self._buttonCaption)
target.addAttribute('nextid', self._nextid)
# Post file to this strean variable
target.addVariable(self, 'action', self.getStreamVariable())
def addListener(self, listener, iface=None):
"""Adds an event listener.
@param listener:
the listener to be added.
"""
if (isinstance(listener, IFailedListener) and
(iface is None or issubclass(iface, IFailedListener))):
self.registerListener(FailedEvent,
listener, _UPLOAD_FAILED_METHOD)
if (isinstance(listener, IFinishedListener) and
(iface is None or issubclass(iface, IFinishedListener))):
self.registerListener(FinishedEvent,
listener, _UPLOAD_FINISHED_METHOD)
if (isinstance(listener, IProgressListener) and
(iface is None or issubclass(iface, IProgressListener))):
self._progressListeners.add(listener)
if (isinstance(listener, IStartedListener) and
(iface is None or issubclass(iface, IStartedListener))):
self.registerListener(StartedEvent,
listener, _UPLOAD_STARTED_METHOD)
if (isinstance(listener, ISucceededListener) and
(iface is None or issubclass(iface, ISucceededListener))):
self.registerListener(SucceededEvent,
listener, _UPLOAD_SUCCEEDED_METHOD)
super(Upload, self).addListener(listener, iface)
def addCallback(self, callback, eventType=None, *args):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, FailedEvent):
self.registerCallback(FailedEvent, callback, None, *args)
elif issubclass(eventType, FinishedEvent):
self.registerCallback(FinishedEvent, callback, None, *args)
elif issubclass(eventType, IProgressListener): # no progress event
self._progressCallbacks[callback] = args
elif issubclass(eventType, StartedEvent):
self.registerCallback(StartedEvent, callback, None, *args)
elif issubclass(eventType, SucceededEvent):
self.registerCallback(SucceededEvent, callback, None, *args)
else:
super(Upload, self).addCallback(callback, eventType, *args)
def removeListener(self, listener, iface=None):
"""Removes an event listener.
@param listener:
the listener to be removed.
"""
if (isinstance(listener, IFailedListener) and
(iface is None or issubclass(iface, IFailedListener))):
self.withdrawListener(FailedEvent,
listener, _UPLOAD_FAILED_METHOD)
if (isinstance(listener, IFinishedListener) and
(iface is None or issubclass(iface, IFinishedListener))):
self.withdrawListener(FinishedEvent,
listener, _UPLOAD_FINISHED_METHOD)
if (isinstance(listener, IProgressListener) and
(iface is None or issubclass(iface, IProgressListener))):
if listener in self._progressListeners:
self._progressListeners.remove(listener)
if (isinstance(listener, IStartedListener) and
(iface is None or issubclass(iface, IStartedListener))):
self.withdrawListener(StartedEvent,
listener, _UPLOAD_STARTED_METHOD)
if (isinstance(listener, ISucceededListener) and
(iface is None or issubclass(iface, ISucceededListener))):
self.withdrawListener(SucceededEvent,
listener, _UPLOAD_SUCCEEDED_METHOD)
super(Upload, self).removeListener(listener, iface)
def removeCallback(self, callback, eventType=None):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, FailedEvent):
self.withdrawCallback(FailedEvent, callback)
elif issubclass(eventType, FinishedEvent):
self.withdrawCallback(FinishedEvent, callback)
elif issubclass(eventType, IProgressListener): # no progress event
if callback in self._progressCallbacks:
del self._progressListeners[callback]
elif issubclass(eventType, StartedEvent):
self.withdrawCallback(StartedEvent, callback)
elif issubclass(eventType, SucceededEvent):
self.withdrawCallback(SucceededEvent, callback)
else:
super(Upload, self).removeCallback(callback, eventType)
def fireStarted(self, filename, MIMEType):
"""Emit upload received event.
"""
evt = StartedEvent(self, filename, MIMEType, self._contentLength)
self.fireEvent(evt)
def fireUploadInterrupted(self, filename, MIMEType, length, e=None):
"""Emits the upload failed event.
"""
if e is None:
evt = FailedEvent(self, filename, MIMEType, length)
else:
evt = FailedEvent(self, filename, MIMEType, length, e)
self.fireEvent(evt)
def fireNoInputStream(self, filename, MIMEType, length):
evt = NoInputStreamEvent(self, filename, MIMEType, length)
self.fireEvent(evt)
def fireNoOutputStream(self, filename, MIMEType, length):
evt = NoOutputStreamEvent(self, filename, MIMEType, length)
self.fireEvent(evt)
def fireUploadSuccess(self, filename, MIMEType, length):
"""Emits the upload success event.
"""
evt = SucceededEvent(self, filename, MIMEType, length)
self.fireEvent(evt)
def fireUpdateProgress(self, totalBytes, contentLength):
"""Emits the progress event.
@param totalBytes:
bytes received so far
@param contentLength:
actual size of the file being uploaded, if known
"""
# this is implemented differently than other listeners to
# maintain backwards compatibility
for l in self._progressListeners:
l.updateProgress(totalBytes, contentLength)
for callback, args in self._progressCallbacks.iteritems():
callback(totalBytes, contentLength, *args)
def getReceiver(self):
"""Returns the current receiver.
@return: the IStreamVariable.
"""
return self._receiver
def setReceiver(self, receiver):
"""Sets the receiver.
@param receiver:
the receiver to set.
"""
self._receiver = receiver
def focus(self):
super(Upload, self).focus()
def getTabIndex(self):
"""Gets the Tabulator index of this IFocusable component.
@see: L{IFocusable.getTabIndex}
"""
return self._tabIndex
def setTabIndex(self, tabIndex):
"""Sets the Tabulator index of this IFocusable component.
@see: L{IFocusable.setTabIndex}
"""
self._tabIndex = tabIndex
def startUpload(self):
"""Go into upload state. This is to prevent double uploading on same
component.
Warning: this is an internal method used by the framework and should
not be used by user of the Upload component. Using it results in the
Upload component going in wrong state and not working. It is currently
public because it is used by another class.
"""
if self._isUploading:
raise ValueError, 'uploading already started'
self._isUploading = True
self._nextid += 1
def interruptUpload(self):
"""Interrupts the upload currently being received. The interruption
will be done by the receiving tread so this method will return
immediately and the actual interrupt will happen a bit later.
"""
if self._isUploading:
self._interrupted = True
def endUpload(self):
"""Go into state where new uploading can begin.
Warning: this is an internal method used by the framework and should
not be used by user of the Upload component.
"""
self._isUploading = False
self._contentLength = -1
self._interrupted = False
self.requestRepaint()
def isUploading(self):
return self._isUploading
def getBytesRead(self):
"""Gets read bytes of the file currently being uploaded.
@return: bytes
"""
return self._totalBytes
def getUploadSize(self):
"""Returns size of file currently being uploaded. Value sane only
during upload.
@return: size in bytes
"""
return self._contentLength
def setProgressListener(self, progressListener):
"""This method is deprecated, use addListener(IProgressListener)
instead.
@deprecated: Use addListener(IProgressListener) instead.
"""
warn('use addListener() instead', DeprecationWarning)
self.addListener(progressListener, IProgressListener)
def getProgressListener(self):
"""This method is deprecated.
@deprecated: Replaced with addListener/removeListener
@return: listener
"""
warn('replaced with addListener/removeListener', DeprecationWarning)
if len(self._progressListeners) == 0:
return None
else:
return iter(self._progressListeners).next()
def getButtonCaption(self):
"""@return: String to be rendered into button that fires uploading"""
return self._buttonCaption
def setButtonCaption(self, buttonCaption):
"""In addition to the actual file chooser, upload components have
button that starts actual upload progress. This method is used to set
text in that button.
In case the button text is set to null, the button is hidden. In this
case developer must explicitly initiate the upload process with
L{submitUpload}.
In case the Upload is used in immediate mode using
L{setImmediate}, the file choose (html input with type
"file") is hidden and only the button with this text is shown.
B{Note} the string given is set as is to the button.
HTML formatting is not stripped. Be sure to properly validate your
value according to your needs.
@param buttonCaption:
text for upload components button.
"""
self._buttonCaption = buttonCaption
self.requestRepaint()
def submitUpload(self):
"""Forces the upload the send selected file to the server.
In case developer wants to use this feature, he/she will most probably
want to hide the uploads internal submit button by setting its caption
to null with L{setButtonCaption} method.
Note, that the upload runs asynchronous. Developer should use normal
upload listeners to trac the process of upload. If the field is empty
uploaded the file name will be empty string and file length 0 in the
upload finished event.
Also note, that the developer should not remove or modify the upload
in the same user transaction where the upload submit is requested. The
upload may safely be hidden or removed once the upload started event
is fired.
"""
self.requestRepaint()
self._forceSubmit = True
def requestRepaint(self):
self._forceSubmit = False
super(Upload, self).requestRepaint()
def getStreamVariable(self):
# Handle to terminal via Upload monitors and controls the upload
# during it is being streamed.
if self._streamVariable is None:
self._streamVariable = InnerStreamVariable(self)
return self._streamVariable
def getListeners(self, eventType):
if issubclass(eventType, IStreamingEvent):
return list(self._progressListeners)
return super(Upload, self).getListeners(eventType)
def getCallbacks(self, eventType):
if issubclass(eventType, IStreamingEvent):
return dict(self._progressCallbacks)
return super(Upload, self).getCallbacks(eventType)
class InnerStreamVariable(IStreamVariable):
def __init__(self, upload):
self._upload = upload
self._lastStartedEvent = None
def listenProgress(self):
return (self._upload.progressListeners is not None
and len(self._upload.progressListeners) > 0)
def onProgress(self, event):
self._upload.fireUpdateProgress(event.getBytesReceived(),
event.getContentLength())
def isInterrupted(self):
return self._upload.interrupted
def getOutputStream(self):
receiveUpload = self._upload.receiver.receiveUpload(
self._lastStartedEvent.getFileName(),
self._lastStartedEvent.getMimeType())
self._lastStartedEvent = None
return receiveUpload
def streamingStarted(self, event):
self.startUpload()
self._upload.contentLength = event.getContentLength()
self._upload.fireStarted(event.getFileName(),
event.getMimeType())
self._lastStartedEvent = event
def streamingFinished(self, event):
self._upload.fireUploadSuccess(event.getFileName(),
event.getMimeType(), event.getContentLength())
self._upload.endUpload()
self._upload.requestRepaint()
def streamingFailed(self, event):
exception = event.getException()
if isinstance(exception, NoInputStreamException):
self._upload.fireNoInputStream(event.getFileName(),
event.getMimeType(), 0)
elif isinstance(exception, NoOutputStreamException):
self._upload.fireNoOutputStream(event.getFileName(),
event.getMimeType(), 0)
else:
self._upload.fireUploadInterrupted(event.getFileName(),
event.getMimeType(), 0, exception)
self._upload.endUpload()
class IReceiver(object):
"""Interface that must be implemented by the upload receivers to provide
the Upload component an output stream to write the uploaded data.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def receiveUpload(self, filename, mimeType):
"""Invoked when a new upload arrives.
@param filename:
the desired filename of the upload, usually as specified
by the client.
@param mimeType:
the MIME type of the uploaded file.
@return: Stream to which the uploaded file should be written.
"""
raise NotImplementedError
class FinishedEvent(ComponentEvent):
"""Upload.FinishedEvent is sent when the upload receives a file,
regardless of whether the reception was successful or failed. If
you wish to distinguish between the two cases, use either SucceededEvent
or FailedEvent, which are both subclasses of the FinishedEvent.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def __init__(self, source, filename, MIMEType, length):
"""@param source:
the source of the file.
@param filename:
the received file name.
@param MIMEType:
the MIME type of the received file.
@param length:
the length of the received file.
"""
super(FinishedEvent, self).__init__(source)
#: MIME type of the received file.
self._type = MIMEType
#: Received file name.
self._filename = filename
#: Length of the received file.
self._length = length
def getUpload(self):
"""Uploads where the event occurred.
@return: the source of the event.
"""
return self.getSource()
def getFilename(self):
"""Gets the file name.
@return: the filename.
"""
return self._filename
def getMIMEType(self):
"""Gets the MIME Type of the file.
@return: the MIME type.
"""
return self._type
def getLength(self):
"""Gets the length of the file.
@return: the length.
"""
return self._length
class FailedEvent(FinishedEvent):
"""Upload.FailedEvent event is sent when the upload is received,
but the reception is interrupted for some reason.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def __init__(self, source, filename, MIMEType, length, reason=None):
super(FailedEvent, self).__init__(source, filename, MIMEType, length)
self._reason = reason
def getReason(self):
"""Gets the exception that caused the failure.
@return: the exception that caused the failure, null if n/a
"""
return self._reason
class NoOutputStreamEvent(FailedEvent):
"""FailedEvent that indicates that an output stream could not be obtained.
"""
def __init__(self, source, filename, MIMEType, length):
super(NoOutputStreamEvent, self).__init__(source, filename, MIMEType,
length)
class NoInputStreamEvent(FailedEvent):
"""FailedEvent that indicates that an input stream could not be obtained.
"""
def __init__(self, source, filename, MIMEType, length):
super(NoInputStreamEvent, self).__init__(source, filename, MIMEType,
length)
class SucceededEvent(FinishedEvent):
"""Upload.SucceededEvent event is sent when the upload is received
successfully.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def __init__(self, source, filename, MIMEType, length):
super(SucceededEvent, self).__init__(source, filename, MIMEType, length)
class StartedEvent(ComponentEvent):
"""Upload.StartedEvent event is sent when the upload is started to
received.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
def __init__(self, source, filename, MIMEType, contentLength):
super(StartedEvent, self).__init__(source)
self._filename = filename
self._type = MIMEType
#: Length of the received file.
self._length = contentLength
def getUpload(self):
"""Uploads where the event occurred.
@return: the source of the event.
"""
return self.getSource()
def getFilename(self):
"""Gets the file name.
@return: the filename.
"""
return self._filename
def getMIMEType(self):
"""Gets the MIME Type of the file.
@return: the MIME type.
"""
return self._type
def getContentLength(self):
"""@return: the length of the file that is being uploaded"""
return self._length
|
rwl/muntjac
|
muntjac/ui/upload.py
|
Python
|
apache-2.0
| 25,882
|
#!/usr/bin/python
'''
Prolog wrapper for docker images submission to batch systems
Created on Oct 28, 2015
@author: mariojmdavid@gmail.com
'''
import time
import os
import peUtils
if __name__ == '__main__':
print '==========================================='
ti = time.time()
param = peUtils.getOptions()
print 'I am the PROLOG of batch system'
print '-------- The env variables --------'
print os.environ
print '-------- The config param ---------'
print param
print '-----------------------------------'
bs = param['global']['batch_sys']
comp_home = os.environ[param['global']['comp_home']]
job_id = os.environ[param['global']['job_id']]
# comp_workdir = comp_home + os.sep + 'job_' + job_id
comp_workdir = comp_home
'''
try:
os.makedirs(comp_workdir, 755)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
pass
'''
# The scp will have to be remodeled, for now just scp
sub_host = os.environ[param['global']['sub_host']]
sub_workdir = os.environ[param['global']['sub_workdir']]
std_in = sub_workdir + os.sep + os.environ['SGEIN1']
exec_job = os.environ[param['global']['job_script']]
print '----------- Several things ----------'
print 'Batch system: %s' % bs
print 'Submit host: %s' % sub_host
print 'Submit workdir: %s' % sub_workdir
print 'Compute node workdir: %s' % comp_workdir
print 'Exec script: %s' % exec_job
print '-----------------------------------'
os.system('scp -r -q %s:%s %s' % (sub_host, std_in, comp_workdir))
os.system(exec_job)
print '==========================================='
|
LIP-Computing/pelogw
|
src/prolw.py
|
Python
|
apache-2.0
| 1,690
|
# -*- coding: utf-8 -*-
"""Console script for Raster Foundry"""
import logging
import click
from .commands import process_upload
logger = logging.getLogger("rf")
@click.group()
@click.option("--verbose/--quiet")
def run(verbose):
"""Console script for raster_foundry_batch_tasks."""
if verbose:
logger.setLevel(logging.DEBUG)
logger.debug("VERBOSE logging enabled")
else:
logger.setLevel(logging.INFO)
run.add_command(process_upload)
|
raster-foundry/raster-foundry
|
app-tasks/rf/src/rf/cli.py
|
Python
|
apache-2.0
| 479
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.lib.common.utils import data_utils
from openstackclient.tests.functional.identity.v3 import common
class ProjectTests(common.IdentityTests):
def test_project_create(self):
project_name = data_utils.rand_name('TestProject')
description = data_utils.rand_name('description')
raw_output = self.openstack(
'project create '
'--domain %(domain)s '
'--description %(description)s '
'--enable '
'--property k1=v1 '
'--property k2=v2 '
'%(name)s' % {'domain': self.domain_name,
'description': description,
'name': project_name})
self.addCleanup(
self.openstack,
'project delete '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': project_name}
)
items = self.parse_show(raw_output)
show_fields = list(self.PROJECT_FIELDS)
show_fields.extend(['k1', 'k2'])
self.assert_show_fields(items, show_fields)
project = self.parse_show_as_object(raw_output)
self.assertEqual('v1', project['k1'])
self.assertEqual('v2', project['k2'])
def test_project_delete(self):
project_name = self._create_dummy_project(add_clean_up=False)
raw_output = self.openstack(
'project delete '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': project_name})
self.assertEqual(0, len(raw_output))
def test_project_list(self):
raw_output = self.openstack('project list')
items = self.parse_listing(raw_output)
self.assert_table_structure(items, common.BASIC_LIST_HEADERS)
def test_project_list_with_domain(self):
project_name = self._create_dummy_project()
raw_output = self.openstack(
'project list --domain %s' % self.domain_name)
items = self.parse_listing(raw_output)
self.assert_table_structure(items, common.BASIC_LIST_HEADERS)
self.assertIn(project_name, raw_output)
self.assertGreater(len(items), 0)
def test_project_set(self):
project_name = self._create_dummy_project()
new_project_name = data_utils.rand_name('NewTestProject')
raw_output = self.openstack(
'project set '
'--name %(new_name)s '
'--disable '
'--property k0=v0 '
'%(name)s' % {'new_name': new_project_name,
'domain': self.domain_name,
'name': project_name})
self.assertEqual(0, len(raw_output))
# check project details
raw_output = self.openstack(
'project show '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': new_project_name}
)
items = self.parse_show(raw_output)
fields = list(self.PROJECT_FIELDS)
fields.extend(['k0'])
self.assert_show_fields(items, fields)
project = self.parse_show_as_object(raw_output)
self.assertEqual(new_project_name, project['name'])
self.assertEqual('False', project['enabled'])
self.assertEqual('v0', project['k0'])
# reset project to make sure it will be cleaned up
self.openstack(
'project set '
'--name %(new_name)s '
'--enable '
'%(name)s' % {'new_name': project_name,
'name': new_project_name})
def test_project_show(self):
raw_output = self.openstack(
'project show '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': self.project_name})
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.PROJECT_FIELDS)
def test_project_show_with_parents_children(self):
json_output = json.loads(self.openstack(
'project show '
'--parents --children -f json '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': self.project_name}))
for attr_name in (self.PROJECT_FIELDS + ['parents', 'subtree']):
self.assertIn(attr_name, json_output)
self.assertEqual(self.project_name, json_output.get('name'))
|
dtroyer/python-openstackclient
|
openstackclient/tests/functional/identity/v3/test_project.py
|
Python
|
apache-2.0
| 5,083
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test elastic search using the synchronizer, i.e. as it would be used by an
user
"""
import time
import os
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
import inspect
import socket
sys.path[0:0] = [""]
try:
from pymongo import MongoClient as Connection
except ImportError:
from pymongo import Connection
from tests.setup_cluster import (kill_mongo_proc,
start_mongo_proc,
start_cluster,
kill_all)
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.code import Code
from bson.binary import Binary
from mongo_connector.doc_managers.elastic_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from pymongo.errors import OperationFailure, AutoReconnect
from tests.util import wait_for
PORTS_ONE = {"PRIMARY": "27117", "SECONDARY": "27118", "ARBITER": "27119",
"CONFIG": "27220", "MONGOS": "27217"}
NUMBER_OF_DOC_DIRS = 100
HOSTNAME = os.environ.get('HOSTNAME', socket.gethostname())
PORTS_ONE['MONGOS'] = os.environ.get('MAIN_ADDR', "27217")
CONFIG = os.environ.get('CONFIG', "config.txt")
class TestElastic(unittest.TestCase):
""" Tests the Elastic instance
"""
def runTest(self):
""" Runs the tests
"""
unittest.TestCase.__init__(self)
@classmethod
def setUpClass(cls):
""" Starts the cluster
"""
os.system('rm %s; touch %s' % (CONFIG, CONFIG))
cls.elastic_doc = DocManager('localhost:9200',
auto_commit=False)
cls.elastic_doc._remove()
cls.flag = start_cluster()
if cls.flag:
cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
replicaSet="demo-repl")
import logging
logger = logging.getLogger()
loglevel = logging.INFO
logger.setLevel(loglevel)
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
kill_all()
def tearDown(self):
""" Ends the connector
"""
self.connector.doc_manager.auto_commit = False
time.sleep(2)
self.connector.join()
def setUp(self):
""" Starts a new connector for every test
"""
if not self.flag:
self.fail("Shards cannot be added to mongos")
self.connector = Connector(
address='%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
oplog_checkpoint=CONFIG,
target_url='localhost:9200',
ns_set=['test.test'],
u_key='_id',
auth_key=None,
doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py'
)
self.connector.start()
while len(self.connector.shard_set) == 0:
pass
self.conn['test']['test'].remove(safe=True)
wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) == 0)
def test_shard_length(self):
"""Tests the shard_length to see if the shard set was recognized
properly
"""
self.assertEqual(len(self.connector.shard_set), 1)
def test_initial(self):
"""Tests search and assures that the databases are clear.
"""
self.conn['test']['test'].remove(safe=True)
self.assertEqual(self.conn['test']['test'].find().count(), 0)
self.assertEqual(sum(1 for _ in self.elastic_doc._search()), 0)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) > 0)
result_set_1 = list(self.elastic_doc._search())
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) == 1)
self.conn['test']['test'].remove({'name': 'paulie'}, safe=True)
wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) != 1)
self.assertEqual(sum(1 for _ in self.elastic_doc._search()), 0)
def test_rollback(self):
"""Tests rollback. We force a rollback by adding a doc, killing the
primary, adding another doc, killing the new primary, and then
restarting both.
"""
primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
self.conn['test']['test'].insert({'name': 'paul'}, safe=True)
condition1 = lambda : self.conn['test']['test'].find(
{'name': 'paul'}).count() == 1
condition2 = lambda : sum(1 for _ in self.elastic_doc._search()) == 1
wait_for(condition1)
wait_for(condition2)
kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])
new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))
admin = new_primary_conn['admin']
wait_for(lambda : admin.command("isMaster")['ismaster'])
time.sleep(5)
count = 0
while True:
try:
self.conn['test']['test'].insert(
{'name': 'pauline'}, safe=True)
break
except OperationFailure:
time.sleep(1)
count += 1
if count >= 60:
sys.exit(1)
continue
wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) == 2)
result_set_1 = list(self.elastic_doc._search())
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 2)
#make sure pauline is there
for item in result_set_1:
if item['name'] == 'pauline':
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])
start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
"/replset1a.log", None)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
"/replset1b.log", None)
time.sleep(2)
result_set_1 = list(self.elastic_doc._search())
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['name'], 'paul')
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_stress(self):
"""Test stress by inserting and removing the number of documents
specified in global
variable
"""
for i in range(0, NUMBER_OF_DOC_DIRS):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
time.sleep(5)
search = self.elastic_doc._search
condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
wait_for(condition)
for i in range(0, NUMBER_OF_DOC_DIRS):
result_set_1 = self.elastic_doc._search()
for item in result_set_1:
if(item['name'] == 'Paul' + str(i)):
self.assertEqual(item['_id'], item['_id'])
def test_stressed_rollback(self):
"""Test stressed rollback with number of documents equal to specified
in global variable. Strategy for rollback is the same as before.
"""
for i in range(0, NUMBER_OF_DOC_DIRS):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)},
safe=True)
search = self.elastic_doc._search
condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
wait_for(condition)
primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])
new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))
admin = new_primary_conn['admin']
wait_for(lambda : admin.command("isMaster")['ismaster'])
time.sleep(5)
count = -1
while count + 1 < NUMBER_OF_DOC_DIRS:
try:
count += 1
self.conn['test']['test'].insert(
{'name': 'Pauline ' + str(count)}, safe=True)
except (OperationFailure, AutoReconnect):
time.sleep(1)
wait_for(lambda : sum(1 for _ in self.elastic_doc._search())
== self.conn['test']['test'].find().count())
result_set_1 = self.elastic_doc._search()
for item in result_set_1:
if 'Pauline' in item['name']:
result_set_2 = self.conn['test']['test'].find_one(
{'name': item['name']})
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])
start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
"/replset1a.log", None)
db_admin = primary_conn["admin"]
wait_for(lambda : db_admin.command("isMaster")['ismaster'])
start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
"/replset1b.log", None)
search = self.elastic_doc._search
condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
wait_for(condition)
result_set_1 = list(self.elastic_doc._search())
self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS)
for item in result_set_1:
self.assertTrue('Paul' in item['name'])
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
def test_non_standard_fields(self):
""" Tests ObjectIds, DBrefs, etc
"""
# This test can break if it attempts to insert before the dump takes
# place- this prevents it (other tests affected too actually)
while (self.connector.shard_set['demo-repl'].checkpoint is None):
time.sleep(1)
docs = [
{'foo': [1, 2]},
{'bar': {'hello': 'world'}},
{'code': Code("function x() { return 1; }")},
{'dbref': {'_ref': DBRef('simple',
ObjectId('509b8db456c02c5ab7e63c34'))}}
]
try:
self.conn['test']['test'].insert(docs)
except OperationFailure:
self.fail("Cannot insert documents into Elastic!")
search = self.elastic_doc._search
if not wait_for(lambda : sum(1 for _ in search()) == len(docs)):
self.fail("Did not get all expected documents")
self.assertIn("dbref", self.elastic_doc.get_last_doc())
def abort_test():
"""Aborts the test
"""
sys.exit(1)
if __name__ == '__main__':
unittest.main()
|
yeroon/mongo-connector
|
tests/test_elastic.py
|
Python
|
apache-2.0
| 11,885
|
from .error_info import TypescriptErrorInfo
from .go_to_definition import TypescriptGoToDefinitionCommand
from .go_to_type import TypescriptGoToTypeCommand
from .nav_to import TypescriptNavToCommand
from .quick_info import TypescriptQuickInfo, TypescriptQuickInfoDoc
from .save import TypescriptSave
from .show_doc import TypescriptShowDoc
from .signature import TypescriptSignaturePanel, TypescriptSignaturePopup
from .format import (
TypescriptFormatBrackets,
TypescriptFormatDocument,
TypescriptFormatLine,
TypescriptFormatOnKey,
TypescriptFormatSelection,
TypescriptPasteAndFormat,
TypescriptAutoIndentOnEnterBetweenCurlyBrackets
)
from .references import (
TypescriptFindReferencesCommand,
TypescriptGoToRefCommand,
TypescriptNextRefCommand,
TypescriptPopulateRefs,
TypescriptPrevRefCommand
)
from .rename import (
TypescriptDelayedRenameFile,
TypescriptFinishRenameCommand,
TypescriptRenameCommand
)
from .build import TypescriptBuildCommand
__all__ = [
"TypescriptAutoIndentOnEnterBetweenCurlyBrackets",
"TypescriptErrorInfo",
"TypescriptFormatBrackets",
"TypescriptFormatDocument",
"TypescriptFormatLine",
"TypescriptFormatOnKey",
"TypescriptFormatSelection",
"TypescriptPasteAndFormat",
"TypescriptGoToDefinitionCommand",
"TypescriptGoToTypeCommand",
"TypescriptGoToRefCommand",
"TypescriptNavToCommand",
"TypescriptQuickInfo",
"TypescriptQuickInfoDoc",
"TypescriptFindReferencesCommand",
"TypescriptGoToDefinitionCommand",
"TypescriptNextRefCommand",
"TypescriptPopulateRefs",
"TypescriptPrevRefCommand",
"TypescriptDelayedRenameFile",
"TypescriptFinishRenameCommand",
"TypescriptRenameCommand",
"TypescriptSave",
"TypescriptShowDoc",
"TypescriptSignaturePanel",
"TypescriptSignaturePopup",
"TypescriptBuildCommand"
]
|
fongandrew/TypeScript-Sublime-JSX-Plugin
|
typescript/commands/__init__.py
|
Python
|
apache-2.0
| 1,897
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions for working with Android devices through adb.
"""
# pylint: disable=E1103
import os
import time
import subprocess
import logging
import re
from wlauto.exceptions import DeviceError, ConfigError, HostError
from wlauto.utils.misc import check_output, escape_single_quotes, escape_double_quotes, get_null
MAX_TRIES = 5
logger = logging.getLogger('android')
# See:
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
ANDROID_VERSION_MAP = {
19: 'KITKAT',
18: 'JELLY_BEAN_MR2',
17: 'JELLY_BEAN_MR1',
16: 'JELLY_BEAN',
15: 'ICE_CREAM_SANDWICH_MR1',
14: 'ICE_CREAM_SANDWICH',
13: 'HONEYCOMB_MR2',
12: 'HONEYCOMB_MR1',
11: 'HONEYCOMB',
10: 'GINGERBREAD_MR1',
9: 'GINGERBREAD',
8: 'FROYO',
7: 'ECLAIR_MR1',
6: 'ECLAIR_0_1',
5: 'ECLAIR',
4: 'DONUT',
3: 'CUPCAKE',
2: 'BASE_1_1',
1: 'BASE',
}
# TODO: these are set to their actual values near the bottom of the file. There
# is some HACKery involved to ensure that ANDROID_HOME does not need to be set
# or adb added to path for root when installing as root, and the whole
# implemenationt is kinda clunky and messier than I'd like. The only file that
# rivals this one in levels of mess is bootstrap.py (for very much the same
# reasons). There must be a neater way to ensure that enviromental dependencies
# are met when they are needed, and are not imposed when they are not.
android_home = None
platform_tools = None
adb = None
aapt = None
fastboot = None
class _AndroidEnvironment(object):
def __init__(self):
self.android_home = None
self.platform_tools = None
self.adb = None
self.aapt = None
self.fastboot = None
class AndroidProperties(object):
def __init__(self, text):
self._properties = {}
self.parse(text)
def parse(self, text):
self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
def __iter__(self):
return iter(self._properties)
def __getattr__(self, name):
return self._properties.get(name)
__getitem__ = __getattr__
class ApkInfo(object):
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
name_regex = re.compile(r"name='(?P<name>[^']+)'")
def __init__(self, path=None):
self.path = path
self.package = None
self.activity = None
self.label = None
self.version_name = None
self.version_code = None
self.parse(path)
def parse(self, apk_path):
_check_env()
command = [aapt, 'dump', 'badging', apk_path]
logger.debug(' '.join(command))
output = subprocess.check_output(command)
for line in output.split('\n'):
if line.startswith('application-label:'):
self.label = line.split(':')[1].strip().replace('\'', '')
elif line.startswith('package:'):
match = self.version_regex.search(line)
if match:
self.package = match.group('name')
self.version_code = match.group('vcode')
self.version_name = match.group('vname')
elif line.startswith('launchable-activity:'):
match = self.name_regex.search(line)
self.activity = match.group('name')
else:
pass # not interested
def fastboot_command(command, timeout=None):
_check_env()
full_command = "fastboot {}".format(command)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
def fastboot_flash_partition(partition, path_to_image):
command = 'flash {} {}'.format(partition, path_to_image)
fastboot_command(command)
def adb_get_device():
"""
Returns the serial number of a connected android device.
If there are more than one device connected to the machine, or it could not
find any device connected, :class:`wlauto.exceptions.ConfigError` is raised.
"""
_check_env()
# TODO this is a hacky way to issue a adb command to all listed devices
# The output of calling adb devices consists of a heading line then
# a list of the devices sperated by new line
# The last line is a blank new line. in otherwords, if there is a device found
# then the output length is 2 + (1 for each device)
output = adb_command('0', "devices").splitlines() # pylint: disable=E1103
output_length = len(output)
if output_length == 3:
# output[1] is the 2nd line in the output which has the device name
# Splitting the line by '\t' gives a list of two indexes, which has
# device serial in 0 number and device type in 1.
return output[1].split('\t')[0]
elif output_length > 3:
raise ConfigError('Number of discovered devices is {}, it should be 1'.format(output_length - 2))
else:
raise ConfigError('No device is connected and available')
def adb_connect(device, timeout=None):
_check_env()
command = "adb connect " + device
if ":5555" in device:
logger.debug(command)
output, _ = check_output(command, shell=True, timeout=timeout)
logger.debug(output)
#### due to a rare adb bug sometimes an extra :5555 is appended to the IP address
if output.find('5555:5555') != -1:
logger.debug('ADB BUG with extra 5555')
command = "adb connect " + device.replace(':5555', '')
tries = 0
while not poll_for_file(device, "/proc/cpuinfo"):
logger.debug("adb connect failed, retrying now...")
tries += 1
if tries > MAX_TRIES:
raise DeviceError('Cannot connect to adb server on the device.')
logger.debug(command)
output, _ = check_output(command, shell=True, timeout=timeout)
time.sleep(10)
if output.find('connected to') == -1:
raise DeviceError('Could not connect to {}'.format(device))
def adb_disconnect(device):
_check_env()
if ":5555" in device:
command = "adb disconnect " + device
logger.debug(command)
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
if retval:
raise DeviceError('"{}" returned {}'.format(command, retval))
def poll_for_file(device, dfile):
_check_env()
device_string = '-s {}'.format(device) if device else ''
command = "adb " + device_string + " shell \" if [ -f " + dfile + " ] ; then true ; else false ; fi\" "
logger.debug(command)
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
if not result:
return True
else:
return False
am_start_error = re.compile(r"Error: Activity class {[\w|.|/]*} does not exist")
def adb_shell(device, command, timeout=None, check_exit_code=False, as_root=False): # NOQA
_check_env()
if as_root:
command = 'echo "{}" | su'.format(escape_double_quotes(command))
device_string = '-s {}'.format(device) if device else ''
full_command = 'adb {} shell "{}"'.format(device_string, escape_double_quotes(command))
logger.debug(full_command)
if check_exit_code:
actual_command = "adb {} shell '({}); echo $?'".format(device_string, escape_single_quotes(command))
raw_output, error = check_output(actual_command, timeout, shell=True)
if raw_output:
try:
output, exit_code, _ = raw_output.rsplit('\n', 2)
except ValueError:
exit_code, _ = raw_output.rsplit('\n', 1)
output = ''
else: # raw_output is empty
exit_code = '969696' # just because
output = ''
exit_code = exit_code.strip()
if exit_code.isdigit():
if int(exit_code):
message = 'Got exit code {}\nfrom: {}\nSTDOUT: {}\nSTDERR: {}'.format(exit_code, full_command,
output, error)
raise DeviceError(message)
elif am_start_error.findall(output):
message = 'Could not start activity; got the following:'
message += '\n{}'.format(am_start_error.findall(output)[0])
raise DeviceError(message)
else: # not all digits
if am_start_error.findall(output):
message = 'Could not start activity; got the following:'
message += '\n{}'.format(am_start_error.findall(output)[0])
raise DeviceError(message)
else:
raise DeviceError('adb has returned early; did not get an exit code. Was kill-server invoked?')
else: # do not check exit code
output, _ = check_output(full_command, timeout, shell=True)
return output
def adb_background_shell(device, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
"""Runs the sepcified command in a subprocess, returning the the Popen object."""
_check_env()
if as_root:
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
device_string = '-s {}'.format(device) if device else ''
full_command = 'adb {} shell "{}"'.format(device_string, escape_double_quotes(command))
logger.debug(full_command)
return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
class AdbDevice(object):
def __init__(self, name, status):
self.name = name
self.status = status
def __cmp__(self, other):
if isinstance(other, AdbDevice):
return cmp(self.name, other.name)
else:
return cmp(self.name, other)
def adb_list_devices():
_check_env()
output = adb_command(None, 'devices')
devices = []
for line in output.splitlines():
parts = [p.strip() for p in line.split()]
if len(parts) == 2:
devices.append(AdbDevice(*parts))
return devices
def adb_command(device, command, timeout=None):
_check_env()
device_string = '-s {}'.format(device) if device else ''
full_command = "adb {} {}".format(device_string, command)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
# Messy environment initialisation stuff...
def _initialize_with_android_home(env):
logger.debug('Using ANDROID_HOME from the environment.')
env.android_home = android_home
env.platform_tools = os.path.join(android_home, 'platform-tools')
os.environ['PATH'] += os.pathsep + env.platform_tools
_init_common(env)
return env
def _initialize_without_android_home(env):
if os.name == 'nt':
raise HostError('Please set ANDROID_HOME to point to the location of the Android SDK.')
# Assuming Unix in what follows.
if subprocess.call('adb version >{}'.format(get_null()), shell=True):
raise HostError('ANDROID_HOME is not set and adb is not in PATH. Have you installed Android SDK?')
logger.debug('Discovering ANDROID_HOME from adb path.')
env.platform_tools = os.path.dirname(subprocess.check_output('which adb', shell=True))
env.android_home = os.path.dirname(env.platform_tools)
_init_common(env)
return env
def _init_common(env):
logger.debug('ANDROID_HOME: {}'.format(env.android_home))
build_tools_directory = os.path.join(env.android_home, 'build-tools')
if not os.path.isdir(build_tools_directory):
msg = 'ANDROID_HOME ({}) does not appear to have valid Android SDK install (cannot find build-tools)'
raise HostError(msg.format(env.android_home))
versions = os.listdir(build_tools_directory)
for version in reversed(sorted(versions)):
aapt_path = os.path.join(build_tools_directory, version, 'aapt')
if os.path.isfile(aapt_path):
logger.debug('Using aapt for version {}'.format(version))
env.aapt = aapt_path
break
else:
raise HostError('aapt not found. Please make sure at least one Android platform is installed.')
def _check_env():
global android_home, platform_tools, adb, aapt # pylint: disable=W0603
if not android_home:
android_home = os.getenv('ANDROID_HOME')
if android_home:
_env = _initialize_with_android_home(_AndroidEnvironment())
else:
_env = _initialize_without_android_home(_AndroidEnvironment())
android_home = _env.android_home
platform_tools = _env.platform_tools
adb = _env.adb
aapt = _env.aapt
|
freedomtan/workload-automation
|
wlauto/utils/android.py
|
Python
|
apache-2.0
| 13,224
|