hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
340d1910d2fe91e2c40ee2105bc0bd2df345ee58 | 45,879 | py | Python | venv/lib/python3.8/site-packages/cssutils/tests/test_value.py | vieee/Scraper_Myntra | 05034d8294c7011cd943ddbef57d1c65526cd95b | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/cssutils/tests/test_value.py | vieee/Scraper_Myntra | 05034d8294c7011cd943ddbef57d1c65526cd95b | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/cssutils/tests/test_value.py | vieee/Scraper_Myntra | 05034d8294c7011cd943ddbef57d1c65526cd95b | [
"MIT"
] | null | null | null | """Testcases for cssutils.css.CSSValue and CSSPrimitiveValue."""
# from decimal import Decimal # maybe for later tests?
import xml.dom
from . import basetest
import cssutils
class PropertyValueTestCase(basetest.BaseTestCase):
def setUp(self):
self.r = cssutils.css.PropertyValue()
def test_init(self):
"PropertyValue.__init__() .item() .length"
pv = cssutils.css.PropertyValue()
self.assertEqual('', pv.cssText)
self.assertEqual(0, pv.length)
self.assertEqual('', pv.value)
cssText = '0, 0/0 1px var(x) url(x)'
items = ['0', '0', '0', '1px', 'var(x)', 'url(x)']
pv = cssutils.css.PropertyValue(cssText)
self.assertEqual(cssText, pv.cssText)
self.assertEqual(6, len(pv))
self.assertEqual(6, pv.length)
# __iter__
for i, x in enumerate(pv):
self.assertEqual(x.cssText, items[i])
# cssText
for i, item in enumerate(items):
self.assertEqual(item, pv[i].cssText)
self.assertEqual(item, pv.item(i).cssText)
def test_cssText(self):
"PropertyValue.cssText"
tests = {
'0': (None, 1, None),
'0 0': (None, 2, None),
'0, 0': (None, 2, None),
'0,0': ('0, 0', 2, None),
'0 , 0': ('0, 0', 2, None),
'0/0': (None, 2, None),
'/**/ 0 /**/': (None, 1, '0'),
'0 /**/ 0 /**/ 0': (None, 3, '0 0 0'),
'0, /**/ 0, /**/ 0': (None, 3, '0, 0, 0'),
'0//**/ 0//**/ 0': (None, 3, '0/0/0'),
'/**/ red': (None, 1, 'red'),
'/**/red': ('/**/ red', 1, 'red'),
'red /**/': (None, 1, 'red'),
'red/**/': ('red /**/', 1, 'red'),
'a()1,-1,+1,1%,-1%,1px,-1px,"a",a,url(a),#aabb44': (
'a() 1, -1, +1, 1%, -1%, 1px, -1px, "a", a, url(a), #ab4',
12,
'a() 1, -1, +1, 1%, -1%, 1px, -1px, "a", a, url(a), #ab4',
),
# calc values
'calc(1)': (None, 1, 'calc(1)'),
'calc( 1)': ('calc(1)', 1, 'calc(1)'),
'calc(1 )': ('calc(1)', 1, 'calc(1)'),
'calc(1px)': (None, 1, 'calc(1px)'),
'calc(1p-x-)': (None, 1, 'calc(1p-x-)'),
'calc(1%)': (None, 1, 'calc(1%)'),
'calc(-1)': (None, 1, 'calc(-1)'),
'calc(+1)': (None, 1, 'calc(+1)'),
'calc(1 + 1px)': ('calc(1 + 1px)', 1, 'calc(1 + 1px)'),
'calc(1 - 1px)': (None, 1, 'calc(1 - 1px)'),
'calc(1*1px)': ('calc(1 * 1px)', 1, 'calc(1 * 1px)'),
'calc(1 / 1px)': ('calc(1 / 1px)', 1, 'calc(1 / 1px)'),
'calc( 1*1px)': ('calc(1 * 1px)', 1, 'calc(1 * 1px)'),
'calc( 1 / 1px)': ('calc(1 / 1px)', 1, 'calc(1 / 1px)'),
'calc(1*1px )': ('calc(1 * 1px)', 1, 'calc(1 * 1px)'),
'calc(1 / 1px )': ('calc(1 / 1px)', 1, 'calc(1 / 1px)'),
'calc( 1*1px )': ('calc(1 * 1px)', 1, 'calc(1 * 1px)'),
'calc( 1 / 1px )': ('calc(1 / 1px)', 1, 'calc(1 / 1px)'),
'calc(var(X))': (None, 1, None),
'calc(2 * var(X))': (None, 1, None),
'calc(2px + var(X))': (None, 1, None),
# issue #24
'rgb(0, 10, 255)': (None, 1, 'rgb(0, 10, 255)'),
'hsl(10, 10%, 25%)': (None, 1, 'hsl(10, 10%, 25%)'),
'rgba(0, 10, 255, 0.5)': (None, 1, 'rgba(0, 10, 255, 0.5)'),
'hsla(10, 10%, 25%, 0.5)': (None, 1, 'hsla(10, 10%, 25%, 0.5)'),
# issue #27
'matrix(0.000092, 0.2500010, -0.250000, 0.000092, 0, 0)': (
'matrix(0.000092, 0.250001, -0.25, 0.000092, 0, 0)',
1,
'matrix(0.000092, 0.250001, -0.25, 0.000092, 0, 0)',
),
}
for (cssText, (c, l, v)) in list(tests.items()):
if c is None:
c = cssText
if v is None:
v = c
pv = cssutils.css.PropertyValue(cssText)
self.assertEqual(c, pv.cssText)
self.assertEqual(l, pv.length)
self.assertEqual(v, pv.value)
tests = {
'0 0px -0px +0px': ('0 0 0 0', 4),
'1 2 3 4': (None, 4),
'-1 -2 -3 -4': (None, 4),
'-1 2': (None, 2),
'-1px red "x"': (None, 3),
'a, b c': (None, 3),
'1px1 2% 3': ('1px1 2% 3', 3),
'f(+1pX, -2, 5%) 1': ('f(+1px, -2, 5%) 1', 2),
'0 f()0': ('0 f() 0', 3),
'f()0': ('f() 0', 2),
'f()1%': ('f() 1%', 2),
'f()1px': ('f() 1px', 2),
'f()"str"': ('f() "str"', 2),
'f()ident': ('f() ident', 2),
'f()#123': ('f() #123', 2),
'f()url()': ('f() url()', 2),
'f()f()': ('f() f()', 2),
'url(x.gif)0 0': ('url(x.gif) 0 0', 3),
'url(x.gif)no-repeat': ('url(x.gif) no-repeat', 2),
}
for (cssText, (c, l)) in list(tests.items()):
if c is None:
c = cssText
pv = cssutils.css.PropertyValue(cssText)
self.assertEqual(c, pv.cssText)
self.assertEqual(l, pv.length)
tests = {
# hash and rgb/a
'#112234': '#112234',
'#112233': '#123',
'rgb(1,2,3)': 'rgb(1, 2, 3)',
'rgb( 1 , 2 , 3 )': 'rgb(1, 2, 3)',
'rgba(1,2,3,4)': 'rgba(1, 2, 3, 4)',
'rgba( 1 , 2 , 3 , 4 )': 'rgba(1, 2, 3, 4)',
'rgb(-1,+2,0)': 'rgb(-1, +2, 0)',
'rgba(-1,+2,0, 0)': 'rgba(-1, +2, 0, 0)',
# FUNCTION
'f(1,2)': 'f(1, 2)',
'f( 1 , 2 )': 'f(1, 2)',
'f(-1,+2)': 'f(-1, +2)',
'f( -1 , +2 )': 'f(-1, +2)',
'fun( -1 , +2 )': 'fun(-1, +2)',
'local( x )': 'local(x)',
'test(1px, #111, y, 1, 1%, "1", y(), var(x))': 'test(1px, #111, y, 1, 1%, "1", y(), var(x))',
'test(-1px, #111, y, -1, -1%, "1", -y())': 'test(-1px, #111, y, -1, -1%, "1", -y())',
'url(y) format( "x" , "y" )': 'url(y) format("x", "y")',
'f(1 2,3 4)': 'f(1 2, 3 4)',
# IE expression
r'Expression()': 'Expression()',
r'expression(-1 < +2)': 'expression(-1<+2)',
r'expression(document.width == "1")': 'expression(document.width=="1")',
'alpha(opacity=80)': 'alpha(opacity=80)',
'alpha( opacity = 80 , x=2 )': 'alpha(opacity=80, x=2)',
'expression(eval(document.documentElement.scrollTop))': 'expression(eval(document.documentElement.scrollTop))',
# TODO
# u'expression((function(ele){ele.style.behavior="none";})(this))':
# u'expression((function(ele){ele.style.behavior="none";})(this))',
# unicode-range
'u+f': 'u+f',
'U+ABCdef': 'u+abcdef',
# url
'url(a)': 'url(a)',
'uRl(a)': 'url(a)',
'u\\rl(a)': 'url(a)',
'url("a")': 'url(a)',
'url( "a" )': 'url(a)',
'url(a)': 'url(a)',
'url(";")': 'url(";")',
'url(",")': 'url(",")',
'url(")")': 'url(")")',
'''url("'")''': '''url("'")''',
'''url('"')''': '''url("\\"")''',
'''url("'")''': '''url("'")''',
# operator
'1': '1',
'1 2': '1 2',
'1 2': '1 2',
'1,2': '1, 2',
'1, 2': '1, 2',
'1 ,2': '1, 2',
'1 , 2': '1, 2',
'1/2': '1/2',
'1/ 2': '1/2',
'1 /2': '1/2',
'1 / 2': '1/2',
# comment
'1/**/2': '1 /**/ 2',
'1 /**/2': '1 /**/ 2',
'1/**/ 2': '1 /**/ 2',
'1 /**/ 2': '1 /**/ 2',
'1 /*a*/ /*b*/ 2': '1 /*a*/ /*b*/ 2',
# , before
'1,/**/2': '1, /**/ 2',
'1 ,/**/2': '1, /**/ 2',
'1, /**/2': '1, /**/ 2',
'1 , /**/2': '1, /**/ 2',
# , after
'1/**/,2': '1 /**/, 2',
'1/**/ ,2': '1 /**/, 2',
'1/**/, 2': '1 /**/, 2',
'1/**/ , 2': '1 /**/, 2',
# all
'1/*a*/ ,/*b*/ 2': '1 /*a*/, /*b*/ 2',
'1 /*a*/, /*b*/2': '1 /*a*/, /*b*/ 2',
'1 /*a*/ , /*b*/ 2': '1 /*a*/, /*b*/ 2',
# list
'a b1,b2 b2,b3,b4': 'a b1, b2 b2, b3, b4',
'a b1 , b2 b2 , b3 , b4': 'a b1, b2 b2, b3, b4',
'u+1 , u+2-5': 'u+1, u+2-5',
'local( x ), url(y) format( "x" , "y" )': 'local(x), url(y) format("x", "y")',
# FUNCTION
'attr( href )': 'attr(href)',
# PrinceXML extende FUNC syntax with nested FUNC
'target-counter(attr(href),page)': 'target-counter(attr(href), page)',
}
self.do_equal_r(tests)
tests = [
'a+',
'-',
'+',
'-%',
'+a',
'--1px',
'++1px',
'#',
'#00',
'#12x',
'#xyz',
'#0000',
'#00000',
'#0000000',
'-#0',
# operator
',',
'1,,2',
'1,/**/,2',
'1 , /**/ , 2',
'1,',
'1, ',
'1 ,',
'1 , ',
'1 , ',
'1//2',
# URL
'url(x))',
# string
'"',
"'",
# function
'f(-)',
'f(x))',
# calc
'calc(',
'calc(1',
'calc(1 + 1',
'calc(1+1)',
'calc(1-1)',
'calc(1 +1)',
'calc(1+ 1)',
'calc(1 -1)',
'calc(1- 1)',
'calc(+)',
'calc(+ 1)',
'calc(-)',
'calc(- 1)',
'calc(*)',
'calc(*1)',
'calc(* 2)',
'calc(/)',
'calc(/1)',
'calc(/ 2)',
'calc(1+)',
'calc(1 +)',
'calc(1 + )',
'calc(2px -)',
'calc(3px*)',
'calc(3px *)',
'calc(3px * )',
'calc(4em/)',
'calc(4em /)',
'calc(4em / )',
'calc(1 + + 1)',
'calc(1 ++ 1)',
]
self.do_raise_r_list(tests, xml.dom.SyntaxErr)
def test_list(self):
"PropertyValue[index]"
# issue #41
css = """div.one {color: rgb(255, 0, 0);} """
sheet = cssutils.parseString(css)
pv = sheet.cssRules[0].style.getProperty('color').propertyValue
self.assertEqual(pv.value, 'rgb(255, 0, 0)')
self.assertEqual(pv[0].value, 'rgb(255, 0, 0)')
# issue #42
sheet = cssutils.parseString('body { font-family: "A", b, serif }')
pv = sheet.cssRules[0].style.getProperty('font-family').propertyValue
self.assertEqual(3, pv.length)
self.assertEqual(pv[0].value, 'A')
self.assertEqual(pv[1].value, 'b')
self.assertEqual(pv[2].value, 'serif')
def test_comments(self):
"PropertyValue with comment"
# issue #45
for t in (
'green',
'green /* comment */',
'/* comment */green',
'/* comment */green/* comment */',
'/* comment */ green /* comment */',
'/* comment *//**/ green /* comment *//**/',
):
sheet = cssutils.parseString('body {color: %s; }' % t)
p = sheet.cssRules[0].style.getProperties()[0]
self.assertEqual(p.valid, True)
for t in (
'gree',
'gree /* comment */',
'/* comment */gree',
'/* comment */gree/* comment */',
'/* comment */ gree /* comment */',
'/* comment *//**/ gree /* comment *//**/',
):
sheet = cssutils.parseString('body {color: %s; }' % t)
p = sheet.cssRules[0].style.getProperties()[0]
self.assertEqual(p.valid, False)
def test_incomplete(self):
"PropertyValue (incomplete)"
tests = {'url("a': 'url(a)', 'url(a': 'url(a)'}
for v, exp in list(tests.items()):
s = cssutils.parseString('a { background: %s' % v)
v = s.cssRules[0].style.background
self.assertEqual(v, exp)
def test_readonly(self):
"PropertyValue._readonly"
v = cssutils.css.PropertyValue(cssText='inherit')
self.assertTrue(False is v._readonly)
v = cssutils.css.PropertyValue(cssText='inherit', readonly=True)
self.assertTrue(True is v._readonly)
self.assertTrue('inherit', v.cssText)
self.assertRaises(xml.dom.NoModificationAllowedErr, v._setCssText, 'x')
self.assertTrue('inherit', v.cssText)
def test_reprANDstr(self):
"PropertyValue.__repr__(), .__str__()"
cssText = 'inherit'
s = cssutils.css.PropertyValue(cssText=cssText)
self.assertTrue(cssText in str(s))
s2 = eval(repr(s))
self.assertTrue(isinstance(s2, s.__class__))
self.assertTrue(cssText == s2.cssText)
class ValueTestCase(basetest.BaseTestCase):
def test_init(self):
"Value.__init__()"
v = cssutils.css.Value()
self.assertTrue('' == v.cssText)
self.assertTrue('' == v.value)
self.assertTrue(None is v.type)
def test_cssText(self):
"Value.cssText"
# HASH IDENT STRING UNICODE-RANGE
tests = {
'#123': ('#123', '#123', 'HASH'),
'#123456': ('#123456', '#123456', 'HASH'),
'#112233': ('#123', '#112233', 'HASH'),
' #112233 ': ('#123', '#112233', 'HASH'),
'red': ('red', 'red', 'IDENT'),
' red ': ('red', 'red', 'IDENT'),
'red ': ('red', 'red', 'IDENT'),
' red': ('red', 'red', 'IDENT'),
'red-': ('red-', 'red-', 'IDENT'),
'-red': ('-red', '-red', 'IDENT'),
'"red"': ('"red"', 'red', 'STRING'),
"'red'": ('"red"', 'red', 'STRING'),
' "red" ': ('"red"', 'red', 'STRING'),
r'"red\""': (r'"red\""', r'red"', 'STRING'),
r"'x\"'": (r'"x\\""', r'x\"', 'STRING'), # ???
'''"x\
y"''': (
'"xy"',
'xy',
'STRING',
),
}
for (p, (r, n, t)) in list(tests.items()):
v = cssutils.css.Value(p)
self.assertEqual(r, v.cssText)
self.assertEqual(t, v.type)
self.assertEqual(n, v.value)
class ColorValueTestCase(basetest.BaseTestCase):
def test_init(self):
"ColorValue.__init__()"
v = cssutils.css.ColorValue()
self.assertEqual(v.COLOR_VALUE, v.type)
self.assertTrue('' == v.cssText)
self.assertTrue('' == v.value)
self.assertEqual('transparent', v.name)
self.assertEqual(None, v.colorType)
def test_cssText(self):
"ColorValue.cssText"
tests = {
# HASH
'#123': ('#123',),
'#112233': ('#123',),
# rgb
'rgb(1,2,3)': ('rgb(1, 2, 3)',),
'rgb(1%,2%,3%)': ('rgb(1%, 2%, 3%)',),
'rgb(-1,-1,-1)': ('rgb(-1, -1, -1)',),
'rgb(-1%,-2%,-3%)': ('rgb(-1%, -2%, -3%)',),
# rgba
'rgba(1,2,3, 0)': ('rgba(1, 2, 3, 0)',),
# hsl
'hsl(1,2%,3%)': ('hsl(1, 2%, 3%)',),
'hsla(1,2%,3%, 1.0)': ('hsla(1, 2%, 3%, 1)',),
}
for (p, (r,)) in list(tests.items()):
v = cssutils.css.ColorValue(p)
self.assertEqual(v.COLOR_VALUE, v.type)
self.assertEqual(r, v.cssText)
self.assertEqual(r, v.value)
v = cssutils.css.ColorValue()
v.cssText = p
self.assertEqual(v.COLOR_VALUE, v.type)
self.assertEqual(r, v.cssText)
self.assertEqual(r, v.value)
tests = {
'1': xml.dom.SyntaxErr,
'a': xml.dom.SyntaxErr,
'#12': xml.dom.SyntaxErr,
'#1234': xml.dom.SyntaxErr,
'#1234567': xml.dom.SyntaxErr,
'#12345678': xml.dom.SyntaxErr,
'rgb(1,1%,1%)': xml.dom.SyntaxErr,
'rgb(1%,1,1)': xml.dom.SyntaxErr,
'rgb(-1,-1%,-1%)': xml.dom.SyntaxErr,
'rgb(-1%,-1,-1)': xml.dom.SyntaxErr,
'rgb(1,1,1, 0)': xml.dom.SyntaxErr,
'rgb(1%,1%,1%, 0)': xml.dom.SyntaxErr,
'rgba(1,1,1)': xml.dom.SyntaxErr,
'rgba(1%,1%,1%)': xml.dom.SyntaxErr,
'rgba(1,1,1, 0%)': xml.dom.SyntaxErr,
'rgba(1%,1%,1%, 0%)': xml.dom.SyntaxErr,
'hsl(1,2%,3%, 1)': xml.dom.SyntaxErr,
'hsla(1,2%,3%)': xml.dom.SyntaxErr,
'hsl(1,2,3)': xml.dom.SyntaxErr,
'hsl(1%,2,3)': xml.dom.SyntaxErr,
'hsl(1%,2,3%)': xml.dom.SyntaxErr,
'hsl(1%,2%,3)': xml.dom.SyntaxErr,
'hsla(1,2%,3%, 0%)': xml.dom.SyntaxErr,
'hsla(1,2,3, 0.0)': xml.dom.SyntaxErr,
'hsla(1%,2,3, 0.0)': xml.dom.SyntaxErr,
'hsla(1%,2,3%, 0.0)': xml.dom.SyntaxErr,
'hsla(1%,2%,3, 0.0)': xml.dom.SyntaxErr,
}
self.r = cssutils.css.ColorValue()
self.do_raise_r(tests)
def test_rgb(self):
"ColorValue.red .green .blue"
tests = {
('#0A0AD2', 'rgb(10, 10, 210)'): (10, 10, 210, 1.0),
# TODO: Fix rounding?
('hsl(240, 91%, 43%)',): (10, 10, 209, 1.0),
('#ff8800', '#f80', 'rgb(255, 136, 0)', 'rgba(255, 136, 0, 1.0)'): (
255,
136,
0,
1.0,
),
(
'red',
'#ff0000',
'#f00',
'hsl(0, 100%, 50%)',
'hsla(0, 100%, 50%, 1.0)',
): (255, 0, 0, 1.0),
('lime', '#00ff00', '#0f0', 'hsl(120, 100%, 50%)'): (0, 255, 0, 1.0),
('rgba(255, 127, 0, .1)', 'rgba(100%, 50%, 0%, .1)'): (255, 127, 0, 0.1),
('transparent', 'rgba(0, 0, 0, 0)'): (0, 0, 0, 0),
('aqua',): (0, 255, 255, 1.0),
}
for colors, rgba in list(tests.items()):
for color in colors:
c = cssutils.css.ColorValue(color)
self.assertEqual(c.red, rgba[0])
self.assertEqual(c.green, rgba[1])
self.assertEqual(c.blue, rgba[2])
self.assertEqual(c.alpha, rgba[3])
class URIValueTestCase(basetest.BaseTestCase):
def test_init(self):
"URIValue.__init__()"
v = cssutils.css.URIValue()
self.assertTrue('url()' == v.cssText)
self.assertTrue('' == v.value)
self.assertTrue('' == v.uri)
self.assertTrue(v.URI is v.type)
v.uri = '1'
self.assertTrue('1' == v.value)
self.assertTrue('1' == v.uri)
self.assertEqual('url(1)', v.cssText)
v.value = '2'
self.assertTrue('2' == v.value)
self.assertTrue('2' == v.uri)
self.assertEqual('url(2)', v.cssText)
def test_absoluteUri(self):
"URIValue.absoluteUri"
s = cssutils.parseString(
'a { background-image: url(x.gif)}', href="/path/to/x.css"
)
v = s.cssRules[0].style.getProperty('background-image').propertyValue[0]
self.assertEqual('x.gif', v.uri)
self.assertEqual('/path/to/x.gif', v.absoluteUri)
v = cssutils.css.URIValue('url(x.gif)')
self.assertEqual('x.gif', v.uri)
self.assertEqual('x.gif', v.absoluteUri)
def test_cssText(self):
"URIValue.cssText"
tests = {
'url()': ('url()', '', 'URI'),
# comments are part of the url!
'url(/**/)': ('url(/**/)', '/**/', 'URI'),
'url(/**/1)': ('url(/**/1)', '/**/1', 'URI'),
'url(1/**/)': ('url(1/**/)', '1/**/', 'URI'),
'url(/**/1/**/)': ('url(/**/1/**/)', '/**/1/**/', 'URI'),
'url(some.gif)': ('url(some.gif)', 'some.gif', 'URI'),
' url(some.gif) ': ('url(some.gif)', 'some.gif', 'URI'),
'url( some.gif )': ('url(some.gif)', 'some.gif', 'URI'),
}
for (p, (r, n, t)) in list(tests.items()):
v = cssutils.css.URIValue(p)
self.assertEqual(r, v.cssText)
self.assertEqual(t, v.type)
self.assertEqual(n, v.value)
self.assertEqual(n, v.uri)
v = cssutils.css.URIValue()
v.cssText = p
self.assertEqual(r, v.cssText)
self.assertEqual(t, v.type)
self.assertEqual(n, v.value)
self.assertEqual(n, v.uri)
tests = {
'a()': xml.dom.SyntaxErr,
'1': xml.dom.SyntaxErr,
'url(': xml.dom.SyntaxErr,
'url("': xml.dom.SyntaxErr,
'url(\'': xml.dom.SyntaxErr,
}
self.r = cssutils.css.URIValue()
self.do_raise_r(tests)
class DimensionValueTestCase(basetest.BaseTestCase):
def test_init(self):
"DimensionValue.__init__()"
v = cssutils.css.DimensionValue()
self.assertTrue('' == v.cssText)
self.assertTrue('' == v.value)
self.assertTrue(None is v.type)
self.assertTrue(None is v.dimension)
def test_cssText(self):
"DimensionValue.cssText"
# NUMBER DIMENSION PERCENTAGE
tests = {
'0': ('0', 0, None, 'NUMBER'),
'00': ('0', 0, None, 'NUMBER'),
'.0': ('0', 0, None, 'NUMBER'),
'0.0': ('0', 0, None, 'NUMBER'),
'+0': ('0', 0, None, 'NUMBER'),
'+00': ('0', 0, None, 'NUMBER'),
'+.0': ('0', 0, None, 'NUMBER'),
'+0.0': ('0', 0, None, 'NUMBER'),
'-0': ('0', 0, None, 'NUMBER'),
'-00': ('0', 0, None, 'NUMBER'),
'-.0': ('0', 0, None, 'NUMBER'),
'-0.0': ('0', 0, None, 'NUMBER'),
'1': ('1', 1, None, 'NUMBER'),
'1.0': ('1', 1.0, None, 'NUMBER'),
'1.1': ('1.1', 1.1, None, 'NUMBER'),
'+1': ('+1', 1, None, 'NUMBER'),
'+1.0': ('+1', 1.0, None, 'NUMBER'),
'+1.1': ('+1.1', 1.1, None, 'NUMBER'),
'-1': ('-1', -1, None, 'NUMBER'),
'-1.0': ('-1', -1, None, 'NUMBER'),
'-1.1': ('-1.1', -1.1, None, 'NUMBER'),
'0px': ('0', 0, 'px', 'DIMENSION'),
'1px': ('1px', 1, 'px', 'DIMENSION'),
'1.0px': ('1px', 1.0, 'px', 'DIMENSION'),
'1.1px': ('1.1px', 1.1, 'px', 'DIMENSION'),
'-1px': ('-1px', -1, 'px', 'DIMENSION'),
'-1.1px': ('-1.1px', -1.1, 'px', 'DIMENSION'),
'+1px': ('+1px', 1, 'px', 'DIMENSION'),
'1px1': ('1px1', 1, 'px1', 'DIMENSION'),
'0%': ('0%', 0, '%', 'PERCENTAGE'),
'1%': ('1%', 1, '%', 'PERCENTAGE'),
'1.1%': ('1.1%', 1.1, '%', 'PERCENTAGE'),
'-1%': ('-1%', -1, '%', 'PERCENTAGE'),
'-1.1%': ('-1.1%', -1.1, '%', 'PERCENTAGE'),
'+1%': ('+1%', 1, '%', 'PERCENTAGE'),
}
for (p, (r, n, d, t)) in list(tests.items()):
v = cssutils.css.DimensionValue(p)
self.assertEqual(r, v.cssText)
self.assertEqual(t, v.type)
self.assertEqual(n, v.value)
self.assertEqual(d, v.dimension)
class CSSFunctionTestCase(basetest.BaseTestCase):
def test_init(self):
"CSSFunction.__init__()"
v = cssutils.css.CSSFunction()
self.assertEqual('', v.cssText)
self.assertEqual('FUNCTION', v.type)
self.assertEqual(v.value, '')
def test_cssText(self):
"CSSFunction.cssText"
tests = {
'x(x)': ('x(x)', None),
'X( X )': ('x(X)', None),
'x(1,2)': ('x(1, 2)', None),
'x(1/**/)': ('x(1 /**/)', 'x(1)'),
'x(/**/1)': ('x(/**/ 1)', 'x(1)'),
'x(/**/1/**/)': ('x(/**/ 1 /**/)', 'x(1)'),
'x(/**/1,x/**/)': ('x(/**/ 1, x /**/)', 'x(1, x)'),
'x(1,2)': ('x(1, 2)', None),
}
for (f, (cssText, value)) in list(tests.items()):
if value is None:
value = cssText
v = cssutils.css.CSSFunction(f)
self.assertEqual(cssText, v.cssText)
self.assertEqual('FUNCTION', v.type)
self.assertEqual(value, v.value)
class CSSVariableTestCase(basetest.BaseTestCase):
def test_init(self):
"CSSVariable.__init__()"
v = cssutils.css.CSSVariable()
self.assertEqual('', v.cssText)
self.assertEqual('VARIABLE', v.type)
self.assertTrue(None is v.name)
self.assertTrue(None is v.value)
def test_cssText(self):
"CSSVariable.cssText"
tests = {
'var(x)': ('var(x)', 'x', None),
'VAR( X )': ('var(X)', 'X', None),
'var(c1,rgb(14,14,14))': (
'var(c1, rgb(14, 14, 14))',
'c1',
'rgb(14, 14, 14)',
),
'var( L, 1px )': ('var(L, 1px)', 'L', '1px'),
'var(L,1)': ('var(L, 1)', 'L', '1'),
'var(T, calc( 2 * 1px ))': ('var(T, calc(2 * 1px))', 'T', 'calc(2 * 1px)'),
'var(U, url( example.png ) )': (
'var(U, url(example.png))',
'U',
'url(example.png)',
),
'var(C, #f00 )': ('var(C, #f00)', 'C', '#fff'),
}
for (var, (cssText, name, fallback)) in list(tests.items()):
v = cssutils.css.CSSVariable(var)
self.assertEqual(cssText, v.cssText)
self.assertEqual('VARIABLE', v.type)
self.assertEqual(name, v.name)
# not resolved so it is None
self.assertEqual(None, v.value)
# def test_cssValueType(self):
# "CSSValue.cssValueType .cssValueTypeString"
# tests = [
# ([u'inherit', u'INhe\\rit'], 'CSS_INHERIT', cssutils.css.CSSValue),
# (['1', '1%', '1em', '1ex', '1px', '1cm', '1mm', '1in', '1pt', '1pc',
# '1deg', '1rad', '1grad', '1ms', '1s', '1hz', '1khz', '1other',
# '"string"', "'string'", 'url(x)', 'red',
# 'attr(a)', 'counter(x)', 'rect(1px, 2px, 3px, 4px)',
# 'rgb(0, 0, 0)', '#000', '#123456', 'rgba(0, 0, 0, 0)',
# 'hsl(0, 0, 0)', 'hsla(0, 0, 0, 0)',
# ],
# 'CSS_PRIMITIVE_VALUE', cssutils.css.CSSPrimitiveValue),
# ([u'1px 1px', 'red blue green x'], 'CSS_VALUE_LIST',
# cssutils.css.CSSValueList),
# # what is a custom value?
# #([], 'CSS_CUSTOM', cssutils.css.CSSValue)
# ]
# for values, name, cls in tests:
# for value in values:
# v = cssutils.css.CSSValue(cssText=value)
# if value == "'string'":
# # will be changed to " always
# value = '"string"'
# self.assertEqual(value, v.cssText)
# self.assertEqual(name, v.cssValueTypeString)
# self.assertEqual(getattr(v, name), v.cssValueType)
# self.assertEqual(cls, type(v))
# class CSSPrimitiveValueTestCase(basetest.BaseTestCase):
#
# def test_init(self):
# "CSSPrimitiveValue.__init__()"
# v = cssutils.css.CSSPrimitiveValue(u'1')
# self.assertTrue(u'1' == v.cssText)
#
# self.assertTrue(v.CSS_PRIMITIVE_VALUE == v.cssValueType)
# self.assertTrue("CSS_PRIMITIVE_VALUE" == v.cssValueTypeString)
#
# self.assertTrue(v.CSS_NUMBER == v.primitiveType)
# self.assertTrue("CSS_NUMBER" == v.primitiveTypeString)
#
# # DUMMY to be able to test empty constructor call
# #self.assertRaises(xml.dom.SyntaxErr, v.__init__, None)
#
# self.assertRaises(xml.dom.InvalidAccessErr, v.getCounterValue)
# self.assertRaises(xml.dom.InvalidAccessErr, v.getRGBColorValue)
# self.assertRaises(xml.dom.InvalidAccessErr, v.getRectValue)
# self.assertRaises(xml.dom.InvalidAccessErr, v.getStringValue)
#
# def test_CSS_UNKNOWN(self):
# "CSSPrimitiveValue.CSS_UNKNOWN"
# v = cssutils.css.CSSPrimitiveValue(u'expression(false)')
# self.assertTrue(v.CSS_UNKNOWN == v.primitiveType)
# self.assertTrue('CSS_UNKNOWN' == v.primitiveTypeString)
#
# def test_CSS_NUMBER_AND_OTHER_DIMENSIONS(self):
# "CSSPrimitiveValue.CSS_NUMBER .. CSS_DIMENSION"
# defs = [
# ('', 'CSS_NUMBER'),
# ('%', 'CSS_PERCENTAGE'),
# ('em', 'CSS_EMS'),
# ('ex', 'CSS_EXS'),
# ('px', 'CSS_PX'),
# ('cm', 'CSS_CM'),
# ('mm', 'CSS_MM'),
# ('in', 'CSS_IN'),
# ('pt', 'CSS_PT'),
# ('pc', 'CSS_PC'),
# ('deg', 'CSS_DEG'),
# ('rad', 'CSS_RAD'),
# ('grad', 'CSS_GRAD'),
# ('ms', 'CSS_MS'),
# ('s', 'CSS_S'),
# ('hz', 'CSS_HZ'),
# ('khz', 'CSS_KHZ'),
# ('other_dimension', 'CSS_DIMENSION')
# ]
# for dim, name in defs:
# for n in (0, 1, 1.1, -1, -1.1, -0):
# v = cssutils.css.CSSPrimitiveValue('%i%s' % (n, dim))
# self.assertEqual(name, v.primitiveTypeString)
# self.assertEqual(getattr(v, name), v.primitiveType)
#
# def test_CSS_STRING_AND_OTHER(self):
# "CSSPrimitiveValue.CSS_STRING .. CSS_RGBCOLOR"
# defs = [
# (('""', "''", '"some thing"', "' A\\ND '",
# # comma separated lists are STRINGS FOR NOW!
# 'a, b',
# '"a", "b"',
# ), 'CSS_STRING'),
# (('url(a)', 'url("a b")', "url(' ')"), 'CSS_URI'),
# (('some', 'or_anth-er'), 'CSS_IDENT'),
# (('attr(a)', 'attr(b)'), 'CSS_ATTR'),
# (('counter(1)', 'counter(2)'), 'CSS_COUNTER'),
# (('rect(1,2,3,4)',), 'CSS_RECT'),
# (('rgb(1,2,3)', 'rgb(10%, 20%, 30%)', '#123', '#123456'),
# 'CSS_RGBCOLOR'),
# (('rgba(1,2,3,4)','rgba(10%, 20%, 30%, 40%)', ),
# 'CSS_RGBACOLOR'),
# (('U+0', 'u+ffffff', 'u+000000-f',
# 'u+0-f, U+ee-ff'), 'CSS_UNICODE_RANGE')
# ]
#
# for examples, name in defs:
# for x in examples:
# v = cssutils.css.CSSPrimitiveValue(x)
# self.assertEqual(getattr(v, name), v.primitiveType)
# self.assertEqual(name, v.primitiveTypeString)
#
# def test_getFloat(self):
# "CSSPrimitiveValue.getFloatValue()"
# # NOT TESTED are float values as it seems difficult to
# # compare these. Maybe use decimal.Decimal?
#
# v = cssutils.css.CSSPrimitiveValue(u'1px')
# tests = {
# '0': (v.CSS_NUMBER, 0),
# '-1.1': (v.CSS_NUMBER, -1.1),
# '1%': (v.CSS_PERCENTAGE, 1),
# '-1%': (v.CSS_PERCENTAGE, -1),
# '1em': (v.CSS_EMS, 1),
# '-1.1em': (v.CSS_EMS, -1.1),
# '1ex': (v.CSS_EXS, 1),
# '1px': (v.CSS_PX, 1),
#
# '1cm': (v.CSS_CM, 1),
# '1cm': (v.CSS_MM, 10),
# '254cm': (v.CSS_IN, 100),
# '1mm': (v.CSS_MM, 1),
# '10mm': (v.CSS_CM, 1),
# '254mm': (v.CSS_IN, 10),
# '1in': (v.CSS_IN, 1),
# '100in': (v.CSS_CM, 254), # ROUNDED!!!
# '10in': (v.CSS_MM, 254), # ROUNDED!!!
#
# '1pt': (v.CSS_PT, 1),
# '1pc': (v.CSS_PC, 1),
#
# '1deg': (v.CSS_DEG, 1),
# '1rad': (v.CSS_RAD, 1),
# '1grad': (v.CSS_GRAD, 1),
#
# '1ms': (v.CSS_MS, 1),
# '1000ms': (v.CSS_S, 1),
# '1s': (v.CSS_S, 1),
# '1s': (v.CSS_MS, 1000),
#
# '1hz': (v.CSS_HZ, 1),
# '1000hz': (v.CSS_KHZ, 1),
# '1khz': (v.CSS_KHZ, 1),
# '1khz': (v.CSS_HZ, 1000),
#
# '1DIMENSION': (v.CSS_DIMENSION, 1),
# }
# for cssText in tests:
# v.cssText = cssText
# unitType, exp = tests[cssText]
# val = v.getFloatValue(unitType)
# if unitType in (v.CSS_IN, v.CSS_CM):
# val = round(val)
# self.assertEqual(val , exp)
#
# def test_setFloat(self):
# "CSSPrimitiveValue.setFloatValue()"
# V = cssutils.css.CSSPrimitiveValue
#
# tests = {
# # unitType, value
# (V.CSS_NUMBER, 1): [
# # unitType, setvalue,
# # getvalue or expected exception, msg or cssText
# (V.CSS_NUMBER, 0, 0, '0'),
# (V.CSS_NUMBER, 0.1, 0.1, '0.1'),
# (V.CSS_NUMBER, -0, 0, '0'),
# (V.CSS_NUMBER, 2, 2, '2'),
# (V.CSS_NUMBER, 2.0, 2, '2'),
# (V.CSS_NUMBER, 2.1, 2.1, '2.1'),
# (V.CSS_NUMBER, -2.1, -2.1, '-2.1'),
# # setting with string does work
# (V.CSS_NUMBER, '1', 1, '1'),
# (V.CSS_NUMBER, '1.1', 1.1, '1.1'),
# (V.CSS_PX, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_DEG, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_RAD, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_GRAD, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_S, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_MS, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_KHZ, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_HZ, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_DIMENSION, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_MM, 2, xml.dom.InvalidAccessErr, None),
#
# (V.CSS_NUMBER, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: floatValue 'x' is not a float"),
# (V.CSS_NUMBER, '1x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: floatValue '1x' is not a float"),
#
# (V.CSS_STRING, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_STRING' is not a float type"),
# (V.CSS_URI, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_URI' is not a float type"),
# (V.CSS_ATTR, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_ATTR' is not a float type"),
# (V.CSS_IDENT, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_IDENT' is not a float type"),
# (V.CSS_RGBCOLOR, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_RGBCOLOR' is not a float type"),
# (V.CSS_RGBACOLOR, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_RGBACOLOR' is not a float type"),
# (V.CSS_RECT, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_RECT' is not a float type"),
# (V.CSS_COUNTER, 'x', xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: unitType 'CSS_COUNTER' is not a float type"),
# (V.CSS_EMS, 1, xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_NUMBER' to 'CSS_EMS'"),
# (V.CSS_EXS, 1, xml.dom.InvalidAccessErr,
# "CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_NUMBER' to 'CSS_EXS'")
# ],
# (V.CSS_MM, '1mm'): [
# (V.CSS_MM, 2, 2, '2mm'),
# (V.CSS_MM, 0, 0, '0mm'),
# (V.CSS_MM, 0.1, 0.1, '0.1mm'),
# (V.CSS_MM, -0, -0, '0mm'),
# (V.CSS_MM, 3.0, 3, '3mm'),
# (V.CSS_MM, 3.1, 3.1, '3.1mm'),
# (V.CSS_MM, -3.1, -3.1, '-3.1mm'),
# (V.CSS_CM, 1, 10, '10mm'),
# (V.CSS_IN, 10, 254, '254mm'),
# (V.CSS_PT, 1, 1828.8, '1828.8mm'),
# (V.CSS_PX, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_NUMBER, 2, xml.dom.InvalidAccessErr, None)
# ],
# (V.CSS_PT, '1pt'): [
# (V.CSS_PT, 2, 2, '2pt'),
# (V.CSS_PC, 12, 1, '1pt'),
# (V.CSS_NUMBER, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_DEG, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_PX, 1, xml.dom.InvalidAccessErr, None)
# ],
# (V.CSS_KHZ, '1khz'): [
# (V.CSS_HZ, 2000, 2, '2khz'),
# (V.CSS_NUMBER, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_DEG, 1, xml.dom.InvalidAccessErr, None),
# (V.CSS_PX, 1, xml.dom.InvalidAccessErr, None)
# ]
# }
# for test in tests:
# initialType, initialValue = test
# pv = cssutils.css.CSSPrimitiveValue(initialValue)
# for setType, setValue, exp, cssText in tests[test]:
# if type(exp) == types.TypeType or\
# type(exp) == types.ClassType: # 2.4 compatibility
# if cssText:
# self.assertRaisesMsg(
# exp, cssText, pv.setFloatValue, setType, setValue)
# else:
# self.assertRaises(
# exp, pv.setFloatValue, setType, setValue)
# else:
# pv.setFloatValue(setType, setValue)
# self.assertEqual(pv._value[0], cssText)
# if cssText == '0mm':
# cssText = '0'
# self.assertEqual(pv.cssText, cssText)
# self.assertEqual(pv.getFloatValue(initialType), exp)
#
# def test_getString(self):
# "CSSPrimitiveValue.getStringValue()"
# v = cssutils.css.CSSPrimitiveValue(u'1px')
# self.assertTrue(v.primitiveType == v.CSS_PX)
# self.assertRaises(xml.dom.InvalidAccessErr,
# v.getStringValue)
#
# pv = cssutils.css.CSSPrimitiveValue
# tests = {
# pv.CSS_STRING: ("'red'", 'red'),
# pv.CSS_STRING: ('"red"', 'red'),
# pv.CSS_URI: ('url(http://example.com)', None),
# pv.CSS_URI: ("url('http://example.com')",
# u"http://example.com"),
# pv.CSS_URI: ('url("http://example.com")',
# u'http://example.com'),
# pv.CSS_URI: ('url("http://example.com?)")',
# u'http://example.com?)'),
# pv.CSS_IDENT: ('red', None),
# pv.CSS_ATTR: ('attr(att-name)',
# u'att-name'), # the name of the attrr
# }
# for t in tests:
# val, exp = tests[t]
# if not exp:
# exp = val
#
# v = cssutils.css.CSSPrimitiveValue(val)
# self.assertEqual(v.primitiveType, t)
# self.assertEqual(v.getStringValue(), exp)
#
# def test_setString(self):
# "CSSPrimitiveValue.setStringValue()"
# # CSS_STRING
# v = cssutils.css.CSSPrimitiveValue(u'"a"')
# self.assertTrue(v.CSS_STRING == v.primitiveType)
# v.setStringValue(v.CSS_STRING, 'b')
# self.assertTrue(('b', 'STRING') == v._value)
# self.assertEqual('b', v.getStringValue())
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_STRING' to 'CSS_URI'",
# v.setStringValue, *(v.CSS_URI, 'x'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_STRING' to 'CSS_IDENT'",
# v.setStringValue, *(v.CSS_IDENT, 'x'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_STRING' to 'CSS_ATTR'",
# v.setStringValue, *(v.CSS_ATTR, 'x'))
#
# # CSS_IDENT
# v = cssutils.css.CSSPrimitiveValue('new')
# v.setStringValue(v.CSS_IDENT, 'ident')
# self.assertTrue(v.CSS_IDENT == v.primitiveType)
# self.assertTrue(('ident', 'IDENT') == v._value)
# self.assertTrue('ident' == v.getStringValue())
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_IDENT' to 'CSS_URI'",
# v.setStringValue, *(v.CSS_URI, 'x'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_IDENT' to 'CSS_STRING'",
# v.setStringValue, *(v.CSS_STRING, '"x"'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_IDENT' to 'CSS_ATTR'",
# v.setStringValue, *(v.CSS_ATTR, 'x'))
#
# # CSS_URI
# v = cssutils.css.CSSPrimitiveValue('url(old)')
# v.setStringValue(v.CSS_URI, '(')
# self.assertEqual((u'(', 'URI'), v._value)
# self.assertEqual(u'(', v.getStringValue())
#
# v.setStringValue(v.CSS_URI, ')')
# self.assertEqual((u')', 'URI'), v._value)
# self.assertEqual(u')', v.getStringValue())
#
# v.setStringValue(v.CSS_URI, '"')
# self.assertEqual(ur'"', v.getStringValue())
# self.assertEqual((ur'"', 'URI'), v._value)
#
# v.setStringValue(v.CSS_URI, "''")
# self.assertEqual(ur"''", v.getStringValue())
# self.assertEqual((ur"''", 'URI'), v._value)
#
# v.setStringValue(v.CSS_URI, ',')
# self.assertEqual(ur',', v.getStringValue())
# self.assertEqual((ur',', 'URI'), v._value)
#
# v.setStringValue(v.CSS_URI, ' ')
# self.assertEqual((u' ', 'URI'), v._value)
# self.assertEqual(u' ', v.getStringValue())
#
# v.setStringValue(v.CSS_URI, 'a)')
# self.assertEqual((u'a)', 'URI'), v._value)
# self.assertEqual(u'a)', v.getStringValue())
#
# v.setStringValue(v.CSS_URI, 'a')
# self.assertTrue(v.CSS_URI == v.primitiveType)
# self.assertEqual((u'a', 'URI'), v._value)
# self.assertEqual(u'a', v.getStringValue())
#
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_URI' to 'CSS_IDENT'",
# v.setStringValue, *(v.CSS_IDENT, 'x'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_URI' to 'CSS_STRING'",
# v.setStringValue, *(v.CSS_STRING, '"x"'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_URI' to 'CSS_ATTR'",
# v.setStringValue, *(v.CSS_ATTR, 'x'))
#
# # CSS_ATTR
# v = cssutils.css.CSSPrimitiveValue('attr(old)')
# v.setStringValue(v.CSS_ATTR, 'a')
# self.assertTrue(v.CSS_ATTR == v.primitiveType)
# self.assertTrue('a' == v.getStringValue())
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_ATTR' to 'CSS_IDENT'",
# v.setStringValue, *(v.CSS_IDENT, 'x'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_ATTR' to 'CSS_STRING'",
# v.setStringValue, *(v.CSS_STRING, '"x"'))
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: Cannot coerce primitiveType
# "'CSS_ATTR' to 'CSS_URI'",
# v.setStringValue, *(v.CSS_URI, 'x'))
#
# # TypeError as 'x' is no valid type
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: stringType 'x' (UNKNOWN TYPE) is not a string type",
# v.setStringValue, *('x', 'brown'))
# # IndexError as 111 is no valid type
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: stringType 111 (UNKNOWN TYPE) is not a string type",
# v.setStringValue, *(111, 'brown'))
# # CSS_PX is no string type
# self.assertRaisesMsg(xml.dom.InvalidAccessErr,
# u"CSSPrimitiveValue: stringType CSS_PX is not a string type",
# v.setStringValue, *(v.CSS_PX, 'brown'))
#
# def test_typeRGBColor(self):
# "RGBColor"
# v = cssutils.css.CSSPrimitiveValue('RGB(1, 5, 10)')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
# self.assertEqual(u'rgb(1, 5, 10)', v.cssText)
#
# v = cssutils.css.CSSPrimitiveValue('rgb(1, 5, 10)')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
# self.assertEqual(u'rgb(1, 5, 10)', v.cssText)
#
# v = cssutils.css.CSSPrimitiveValue('rgb(1%, 5%, 10%)')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
# self.assertEqual(u'rgb(1%, 5%, 10%)', v.cssText)
#
# v = cssutils.css.CSSPrimitiveValue(' rgb( 1 ,5, 10 )')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
# v = cssutils.css.CSSPrimitiveValue('rgb(1,5,10)')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
# v = cssutils.css.CSSPrimitiveValue('rgb(1%, .5%, 10.1%)')
# self.assertEqual(v.CSS_RGBCOLOR, v.primitiveType)
if __name__ == '__main__':
import unittest
unittest.main()
| 39.825521 | 123 | 0.452015 |
114e360a4d55580ef9a106bebd8e1ee947f88ad7 | 445 | py | Python | examples/example7-all-boards.py | KebabLord/py8chan | e52081a728c172e3bbbf1fa88951d24295930f90 | [
"WTFPL"
] | 16 | 2016-01-29T01:06:24.000Z | 2021-01-11T17:33:14.000Z | examples/example7-all-boards.py | KebabLord/py8chan | e52081a728c172e3bbbf1fa88951d24295930f90 | [
"WTFPL"
] | 20 | 2015-10-23T14:47:58.000Z | 2021-07-05T23:39:17.000Z | examples/example7-all-boards.py | KebabLord/py8chan | e52081a728c172e3bbbf1fa88951d24295930f90 | [
"WTFPL"
] | 13 | 2016-10-07T18:46:10.000Z | 2022-03-27T16:08:43.000Z | # This example demonstrates the use of the get_all_boards function
import py8chan
def main():
# Get a list of all boards
boards = py8chan.get_all_boards()
# Sort boards by the number of posts
boards.sort(key=lambda x: x.num_posts, reverse=True)
# Print the title and subtitle of the first 10 boards
for i in range(0, 10):
print(boards[i].title + ": " + boards[i].subtitle)
if __name__ == '__main__':
main()
| 29.666667 | 66 | 0.676404 |
fa729824bf66b945b01e2b7c4a282b63a89c915c | 12,539 | py | Python | pymdwizard/gui/ui_files/UI_timeinfo.py | mmfink/fort-pymdwizard | 96f46e8cc2594b82b475b4f3fcae96a05ebc03e4 | [
"CC-BY-4.0"
] | 53 | 2017-05-01T05:03:33.000Z | 2022-03-13T04:49:15.000Z | pymdwizard/gui/ui_files/UI_timeinfo.py | mmfink/fort-pymdwizard | 96f46e8cc2594b82b475b4f3fcae96a05ebc03e4 | [
"CC-BY-4.0"
] | 109 | 2017-05-17T15:15:40.000Z | 2022-03-24T21:12:45.000Z | pymdwizard/gui/ui_files/UI_timeinfo.py | mmfink/fort-pymdwizard | 96f46e8cc2594b82b475b4f3fcae96a05ebc03e4 | [
"CC-BY-4.0"
] | 17 | 2017-02-08T16:18:18.000Z | 2021-01-28T19:38:09.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'timeinfo.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(294, 158)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.widget_2 = QtWidgets.QWidget(Form)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtWidgets.QWidget(self.widget_2)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setContentsMargins(3, 3, 3, 0)
self.verticalLayout_2.setSpacing(3)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setStyleSheet("font: italic;")
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_3 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setItalic(True)
self.label_3.setFont(font)
self.label_3.setStyleSheet("font: italic;")
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.label_9 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setMinimumSize(QtCore.QSize(15, 0))
self.label_9.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_9.setFont(font)
self.label_9.setScaledContents(True)
self.label_9.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.label_9.setIndent(0)
self.label_9.setObjectName("label_9")
self.horizontalLayout_3.addWidget(self.label_9)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.radio_single = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radio_single.sizePolicy().hasHeightForWidth())
self.radio_single.setSizePolicy(sizePolicy)
self.radio_single.setChecked(True)
self.radio_single.setObjectName("radio_single")
self.horizontalLayout_4.addWidget(self.radio_single)
self.radio_range = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radio_range.sizePolicy().hasHeightForWidth())
self.radio_range.setSizePolicy(sizePolicy)
self.radio_range.setObjectName("radio_range")
self.horizontalLayout_4.addWidget(self.radio_range)
self.radio_multiple = QtWidgets.QRadioButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.radio_multiple.sizePolicy().hasHeightForWidth()
)
self.radio_multiple.setSizePolicy(sizePolicy)
self.radio_multiple.setObjectName("radio_multiple")
self.horizontalLayout_4.addWidget(self.radio_multiple)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.fgdc_timeinfo = QtWidgets.QStackedWidget(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fgdc_timeinfo.sizePolicy().hasHeightForWidth()
)
self.fgdc_timeinfo.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
self.fgdc_timeinfo.setFont(font)
self.fgdc_timeinfo.setObjectName("fgdc_timeinfo")
self.fgdc_sngdate = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fgdc_sngdate.sizePolicy().hasHeightForWidth())
self.fgdc_sngdate.setSizePolicy(sizePolicy)
self.fgdc_sngdate.setObjectName("fgdc_sngdate")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.fgdc_sngdate)
self.verticalLayout_10.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout_10.setContentsMargins(9, 9, 0, 0)
self.verticalLayout_10.setSpacing(6)
self.verticalLayout_10.setObjectName("verticalLayout_10")
spacerItem = QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum
)
self.verticalLayout_10.addItem(spacerItem)
self.fgdc_timeinfo.addWidget(self.fgdc_sngdate)
self.fgdc_rngdates = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fgdc_rngdates.sizePolicy().hasHeightForWidth()
)
self.fgdc_rngdates.setSizePolicy(sizePolicy)
self.fgdc_rngdates.setObjectName("fgdc_rngdates")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.fgdc_rngdates)
self.verticalLayout_7.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_7.setContentsMargins(0, 9, 0, 0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.layout_daterange = QtWidgets.QHBoxLayout()
self.layout_daterange.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.layout_daterange.setSpacing(0)
self.layout_daterange.setObjectName("layout_daterange")
self.verticalLayout_3.addLayout(self.layout_daterange)
self.verticalLayout_7.addLayout(self.verticalLayout_3)
spacerItem1 = QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum
)
self.verticalLayout_7.addItem(spacerItem1)
self.fgdc_timeinfo.addWidget(self.fgdc_rngdates)
self.fgdc_mdattim = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fgdc_mdattim.sizePolicy().hasHeightForWidth())
self.fgdc_mdattim.setSizePolicy(sizePolicy)
self.fgdc_mdattim.setObjectName("fgdc_mdattim")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.fgdc_mdattim)
self.verticalLayout_9.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_9.setContentsMargins(4, 4, 4, 0)
self.verticalLayout_9.setSpacing(3)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.layout_multipledates = QtWidgets.QVBoxLayout()
self.layout_multipledates.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.layout_multipledates.setContentsMargins(-1, 0, -1, -1)
self.layout_multipledates.setObjectName("layout_multipledates")
self.horizontalLayout_6.addLayout(self.layout_multipledates)
self.verticalLayout_9.addLayout(self.horizontalLayout_6)
self.fgdc_timeinfo.addWidget(self.fgdc_mdattim)
self.verticalLayout_2.addWidget(self.fgdc_timeinfo)
self.horizontalLayout.addWidget(self.widget)
self.verticalLayout.addWidget(self.widget_2)
self.retranslateUi(Form)
self.fgdc_timeinfo.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_2.setText(
_translate("Form", "What is the time period represented in the dataset?")
)
self.label_3.setText(
_translate(
"Form",
"Select one of 'Single Date', 'Date Range', or 'Multiple Dates'.",
)
)
self.label_9.setToolTip(_translate("Form", "Required"))
self.label_9.setText(
_translate(
"Form",
'<html><head/><body><p><span style=" font-size:18pt; color:#55aaff;">*</span></p></body></html>',
)
)
self.radio_single.setText(_translate("Form", "Single"))
self.radio_range.setText(_translate("Form", "Range"))
self.radio_multiple.setText(_translate("Form", "Multiple"))
| 48.042146 | 113 | 0.70181 |
1b0792d07b0972f963515f262d508bed46e496cc | 542 | py | Python | sark/plumbing.py | bmeisels/Sark | f02b7e7b501b73d786d2286b72ee6651812fb720 | [
"MIT"
] | 572 | 2015-03-09T09:26:02.000Z | 2022-03-20T14:13:34.000Z | sark/plumbing.py | shemesh999/Sark | 421f22124006567d79443135046519f0487c7261 | [
"MIT"
] | 78 | 2015-05-25T12:54:19.000Z | 2022-03-28T00:36:52.000Z | sark/plumbing.py | shemesh999/Sark | 421f22124006567d79443135046519f0487c7261 | [
"MIT"
] | 105 | 2015-05-17T20:31:18.000Z | 2022-03-21T15:45:30.000Z | import os
SARK_PLUGINS_ENV_NAME = "sarkPlugins"
SARK_DEFAULT_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), "../"))
def get_sark_dir(dirname, envname=None):
if envname in os.environ:
return os.environ[envname]
return os.path.join(SARK_DEFAULT_PATH, dirname)
def get_plugins_dir():
return get_sark_dir("plugins", SARK_PLUGINS_ENV_NAME)
def get_codecs_dir():
from sark import encodings
return os.path.dirname(encodings.__file__)
CODECS_DIR = get_codecs_dir()
PLUGINS_DIR = get_plugins_dir() | 23.565217 | 84 | 0.750923 |
dfca9a3367d71c40fc615c97c387e6a3efa1cd70 | 247,900 | py | Python | src/azure-cli/azure/cli/command_modules/network/_help.py | damienbod/azure-cli | a804e5f8ebdfe2af145f51eedd44266076d3cccb | [
"MIT"
] | 4 | 2016-08-23T06:19:01.000Z | 2018-03-20T22:47:15.000Z | src/azure-cli/azure/cli/command_modules/network/_help.py | damienbod/azure-cli | a804e5f8ebdfe2af145f51eedd44266076d3cccb | [
"MIT"
] | 120 | 2018-03-27T19:14:40.000Z | 2020-12-10T23:53:35.000Z | src/azure-cli/azure/cli/command_modules/network/_help.py | damienbod/azure-cli | a804e5f8ebdfe2af145f51eedd44266076d3cccb | [
"MIT"
] | 11 | 2018-08-23T21:31:06.000Z | 2020-09-03T21:39:51.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['network'] = """
type: group
short-summary: Manage Azure Network resources.
"""
helps['network application-gateway'] = """
type: group
short-summary: Manage application-level routing and load balancing services.
long-summary: To learn more about Application Gateway, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-create-gateway-cli
"""
helps['network application-gateway address-pool'] = """
type: group
short-summary: Manage address pools of an application gateway.
"""
helps['network application-gateway address-pool create'] = """
type: command
short-summary: Create an address pool.
examples:
- name: Create an address pool with two endpoints.
text: |
az network application-gateway address-pool create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyAddressPool --servers 10.0.0.4 10.0.0.5
"""
helps['network application-gateway address-pool delete'] = """
type: command
short-summary: Delete an address pool.
examples:
- name: Delete an address pool.
text: az network application-gateway address-pool delete -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool
"""
helps['network application-gateway address-pool list'] = """
type: command
short-summary: List address pools.
examples:
- name: List address pools.
text: az network application-gateway address-pool list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway address-pool show'] = """
type: command
short-summary: Get the details of an address pool.
examples:
- name: Get the details of an address pool.
text: az network application-gateway address-pool show -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool
"""
helps['network application-gateway address-pool update'] = """
type: command
short-summary: Update an address pool.
examples:
- name: Update backend address pool.
text: az network application-gateway address-pool update -g MyResourceGroup --gateway-name MyAppGateway \\ -n MyAddressPool --servers 10.0.0.4 10.0.0.5 10.0.0.6
- name: Add to the backend address pool by using backend server IP address.
text: |
az network application-gateway address-pool update -g MyResourceGroup --gateway-name MyAppGateway -n MyAddressPool \\
--add backendAddresses "{ \"ip_address\": \"{10.0.0.13}\" }"
"""
helps['network application-gateway auth-cert'] = """
type: group
short-summary: Manage authorization certificates of an application gateway.
"""
helps['network application-gateway auth-cert create'] = """
type: command
short-summary: Create an authorization certificate.
examples:
- name: Create an authorization certificate.
text: |
az network application-gateway auth-cert create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyAuthCert --cert-file /path/to/cert/file
"""
helps['network application-gateway auth-cert delete'] = """
type: command
short-summary: Delete an authorization certificate.
examples:
- name: Delete an authorization certificate.
text: az network application-gateway auth-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MyAuthCert
"""
helps['network application-gateway auth-cert list'] = """
type: command
short-summary: List authorization certificates.
examples:
- name: List authorization certificates.
text: az network application-gateway auth-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway auth-cert show'] = """
type: command
short-summary: Show an authorization certificate.
examples:
- name: Show an authorization certificate.
text: az network application-gateway auth-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MyAuthCert
- name: View expiry date of an authorization certificate. It is in Base-64 encoded X.509(.CER) format.
text: |
az network application-gateway auth-cert show -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyAuthCert --query data -o tsv | base64 -d | openssl x509 -enddate -noout
"""
helps['network application-gateway auth-cert update'] = """
type: command
short-summary: Update an authorization certificate.
examples:
- name: Update authorization certificates to use a new cert file.
text: az network application-gateway auth-cert update -g MyResourceGroup --gateway-name MyAppGateway \\ -n MyAuthCert --cert-file /path/to/new/cert/file
"""
helps['network application-gateway create'] = """
type: command
short-summary: Create an application gateway.
examples:
- name: Create an application gateway with VMs as backend servers.
text: |
az network application-gateway create -g MyResourceGroup -n MyAppGateway --capacity 2 --sku Standard_Medium \\
--vnet-name MyVNet --subnet MySubnet --http-settings-cookie-based-affinity Enabled \\
--public-ip-address MyAppGatewayPublicIp --servers 10.0.0.4 10.0.0.5
- name: Create an application gateway. (autogenerated)
text: |
az network application-gateway create --capacity 2 --frontend-port MyFrontendPort --http-settings-cookie-based-affinity Enabled --http-settings-port 80 --http-settings-protocol Http --location westus2 --name MyAppGateway --public-ip-address MyAppGatewayPublicIp --resource-group MyResourceGroup --sku Standard_Small --subnet MySubnet --vnet-name MyVNet
crafted: true
"""
helps['network application-gateway delete'] = """
type: command
short-summary: Delete an application gateway.
examples:
- name: Delete an application gateway.
text: az network application-gateway delete -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway private-link'] = """
type: group
short-summary: Manage Private Link of an Application Gateway
"""
helps['network application-gateway private-link add'] = """
type: command
short-summary: Add a new Private Link with a default IP Configuration and associate it with an existing Frontend IP
"""
helps['network application-gateway private-link remove'] = """
type: command
short-summary: Remove a Private Link and clear association with Frontend IP. The subnet associate with a Private Link might need to clear manually
"""
helps['network application-gateway private-link show'] = """
type: command
short-summary: Show a Private Link
"""
helps['network application-gateway private-link list'] = """
type: command
short-summary: List all the Private Link
"""
helps['network application-gateway private-link wait'] = """
type: command
short-summary: Place the CLI in a waiting state until the condition of corresponding application gateway is met
"""
helps['network application-gateway private-link ip-config'] = """
type: group
short-summary: Manage IP configuration of a Private Link to configure its capability
"""
helps['network application-gateway private-link ip-config add'] = """
type: command
short-summary: Add an IP configuration to a Private Link to scale up its capability
"""
helps['network application-gateway private-link ip-config remove'] = """
type: command
short-summary: Remove an IP configuration from a Private Link to scale down its capability
"""
helps['network application-gateway private-link ip-config show'] = """
type: command
short-summary: Show an IP configuration of a Private Link
"""
helps['network application-gateway private-link ip-config list'] = """
type: command
short-summary: List all the IP configuration of a Private Link
"""
helps['network application-gateway private-link ip-config wait'] = """
type: command
short-summary: Place the CLI in a waiting state until the condition of corresponding application gateway is met
"""
helps['network application-gateway frontend-ip'] = """
type: group
short-summary: Manage frontend IP addresses of an application gateway.
"""
helps['network application-gateway frontend-ip create'] = """
type: command
short-summary: Create a frontend IP address.
examples:
- name: Create a frontend IP address.
text: |
az network application-gateway frontend-ip create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendIp --public-ip-address MyPublicIpAddress
- name: Create a frontend IP address. (autogenerated)
text: |
az network application-gateway frontend-ip create --gateway-name MyAppGateway --name MyFrontendIp --private-ip-address 10.10.10.50 --resource-group MyResourceGroup --subnet MySubnet --vnet-name MyVnet
crafted: true
"""
helps['network application-gateway frontend-ip delete'] = """
type: command
short-summary: Delete a frontend IP address.
examples:
- name: Delete a frontend IP address.
text: az network application-gateway frontend-ip delete -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendIp
"""
helps['network application-gateway frontend-ip list'] = """
type: command
short-summary: List frontend IP addresses.
examples:
- name: List frontend IP addresses.
text: az network application-gateway frontend-ip list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway frontend-ip show'] = """
type: command
short-summary: Get the details of a frontend IP address.
examples:
- name: Get the details of a frontend IP address.
text: az network application-gateway frontend-ip show -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendIp
"""
helps['network application-gateway frontend-ip update'] = """
type: command
short-summary: Update a frontend IP address.
examples:
- name: Update a frontend IP address. (autogenerated)
text: |
az network application-gateway frontend-ip update --gateway-name MyAppGateway --name MyFrontendIp --private-ip-address 10.10.10.50 --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway frontend-port'] = """
type: group
short-summary: Manage frontend ports of an application gateway.
"""
helps['network application-gateway frontend-port create'] = """
type: command
short-summary: Create a frontend port.
examples:
- name: Create a frontend port.
text: |
az network application-gateway frontend-port create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendPort --port 8080
"""
helps['network application-gateway frontend-port delete'] = """
type: command
short-summary: Delete a frontend port.
examples:
- name: Delete a frontend port.
text: az network application-gateway frontend-port delete -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendPort
"""
helps['network application-gateway frontend-port list'] = """
type: command
short-summary: List frontend ports.
examples:
- name: List frontend ports.
text: az network application-gateway frontend-port list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway frontend-port show'] = """
type: command
short-summary: Get the details of a frontend port.
examples:
- name: Get the details of a frontend port.
text: az network application-gateway frontend-port show -g MyResourceGroup --gateway-name MyAppGateway -n MyFrontendPort
"""
helps['network application-gateway frontend-port update'] = """
type: command
short-summary: Update a frontend port.
examples:
- name: Update a frontend port to use a different port.
text: |
az network application-gateway frontend-port update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyFrontendPort --port 8081
"""
helps['network application-gateway http-listener'] = """
type: group
short-summary: Manage HTTP listeners of an application gateway.
"""
helps['network application-gateway http-listener create'] = """
type: command
short-summary: Create an HTTP listener.
examples:
- name: Create an HTTP listener.
text: |
az network application-gateway http-listener create -g MyResourceGroup --gateway-name MyAppGateway \\
--frontend-port MyFrontendPort -n MyHttpListener --frontend-ip MyAppGatewayPublicIp
"""
helps['network application-gateway http-listener delete'] = """
type: command
short-summary: Delete an HTTP listener.
examples:
- name: Delete an HTTP listener.
text: az network application-gateway http-listener delete -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpListener
"""
helps['network application-gateway http-listener list'] = """
type: command
short-summary: List HTTP listeners.
examples:
- name: List HTTP listeners.
text: az network application-gateway http-listener list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway http-listener show'] = """
type: command
short-summary: Get the details of an HTTP listener.
examples:
- name: Get the details of an HTTP listener.
text: az network application-gateway http-listener show -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpListener
"""
helps['network application-gateway http-listener update'] = """
type: command
short-summary: Update an HTTP listener.
examples:
- name: Update an HTTP listener to use a different hostname.
text: |
az network application-gateway http-listener update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpListener --host-name www.mynewhost.com
"""
helps['network application-gateway http-settings'] = """
type: group
short-summary: Manage HTTP settings of an application gateway.
"""
helps['network application-gateway http-settings create'] = """
type: command
short-summary: Create HTTP settings.
examples:
- name: Create HTTP settings.
text: |
az network application-gateway http-settings create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpSettings --port 80 --protocol Http --cookie-based-affinity Disabled --timeout 30
- name: Create HTTP settings. (autogenerated)
text: |
az network application-gateway http-settings create --affinity-cookie-name MyAffinityCookie --cookie-based-affinity Enabled --gateway-name MyAppGateway --host-name MyHost --name MyHttpSettings --port 80 --probe MyNewProbe --protocol Http --resource-group MyResourceGroup --timeout 30
crafted: true
"""
helps['network application-gateway http-settings delete'] = """
type: command
short-summary: Delete HTTP settings.
examples:
- name: Delete HTTP settings.
text: az network application-gateway http-settings delete -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpSettings
"""
helps['network application-gateway http-settings list'] = """
type: command
short-summary: List HTTP settings.
examples:
- name: List HTTP settings.
text: az network application-gateway http-settings list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway http-settings show'] = """
type: command
short-summary: Get the details of a gateway's HTTP settings.
examples:
- name: Get the details of a gateway's HTTP settings.
text: az network application-gateway http-settings show -g MyResourceGroup --gateway-name MyAppGateway -n MyHttpSettings
"""
helps['network application-gateway http-settings update'] = """
type: command
short-summary: Update HTTP settings.
examples:
- name: Update HTTP settings to use a new probe.
text: |
az network application-gateway http-settings update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyHttpSettings --probe MyNewProbe
- name: Update HTTP settings. (autogenerated)
text: |
az network application-gateway http-settings update --enable-probe true --gateway-name MyAppGateway --name MyHttpSettings --probe MyNewProbe --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway identity'] = """
type: group
short-summary: Manage the managed service identity of an application gateway.
"""
helps['network application-gateway identity assign'] = """
type: command
short-summary: Assign a managed service identity to an application-gateway
examples:
- name: Assign an identity to the application gateway
text: az network application-gateway identity assign -g MyResourceGroup --gateway-name ag1 \\ --identity /subscriptions/*-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1
"""
helps['network application-gateway identity remove'] = """
type: command
short-summary: Remove the managed service identity of an application-gateway
examples:
- name: Remove an identity to the application gateway
text: az network application-gateway identity remove -g MyResourceGroup --gateway-name ag1
"""
helps['network application-gateway identity show'] = """
type: command
short-summary: Show the managed service identity of an application-gateway
examples:
- name: Show an identity to the application gateway
text: az network application-gateway identity show -g MyResourceGroup --gateway-name ag1
"""
helps['network application-gateway list'] = """
type: command
short-summary: List application gateways.
examples:
- name: List application gateways.
text: az network application-gateway list -g MyResourceGroup
"""
helps['network application-gateway probe'] = """
type: group
short-summary: Manage probes to gather and evaluate information on a gateway.
"""
helps['network application-gateway probe create'] = """
type: command
short-summary: Create a probe.
examples:
- name: Create an application gateway probe.
text: |
az network application-gateway probe create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyProbe --protocol https --host 127.0.0.1 --path /path/to/probe
"""
helps['network application-gateway probe delete'] = """
type: command
short-summary: Delete a probe.
examples:
- name: Delete a probe.
text: az network application-gateway probe delete -g MyResourceGroup --gateway-name MyAppGateway -n MyProbe
- name: Delete a probe. (autogenerated)
text: |
az network application-gateway probe delete --gateway-name MyAppGateway --name MyProbe --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway probe list'] = """
type: command
short-summary: List probes.
examples:
- name: List probes.
text: az network application-gateway probe list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway probe show'] = """
type: command
short-summary: Get the details of a probe.
examples:
- name: Get the details of a probe.
text: az network application-gateway probe show -g MyResourceGroup --gateway-name MyAppGateway -n MyProbe
"""
helps['network application-gateway probe update'] = """
type: command
short-summary: Update a probe.
examples:
- name: Update an application gateway probe with a timeout of 60 seconds.
text: |
az network application-gateway probe update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyProbe --timeout 60
- name: Update a probe. (autogenerated)
text: |
az network application-gateway probe update --gateway-name MyAppGateway --host 127.0.0.1 --name MyProbe --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway redirect-config'] = """
type: group
short-summary: Manage redirect configurations.
"""
helps['network application-gateway redirect-config create'] = """
type: command
short-summary: Create a redirect configuration.
examples:
- name: Create a redirect configuration to a http-listener called MyBackendListener.
text: |
az network application-gateway redirect-config create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyRedirectConfig --type Permanent \\
--include-path true --include-query-string true --target-listener MyBackendListener
"""
helps['network application-gateway redirect-config delete'] = """
type: command
short-summary: Delete a redirect configuration.
examples:
- name: Delete a redirect configuration.
text: az network application-gateway redirect-config delete -g MyResourceGroup \\ --gateway-name MyAppGateway -n MyRedirectConfig
"""
helps['network application-gateway redirect-config list'] = """
type: command
short-summary: List redirect configurations.
examples:
- name: List redirect configurations.
text: az network application-gateway redirect-config list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway redirect-config show'] = """
type: command
short-summary: Get the details of a redirect configuration.
examples:
- name: Get the details of a redirect configuration.
text: az network application-gateway redirect-config show -g MyResourceGroup --gateway-name MyAppGateway -n MyRedirectConfig
"""
helps['network application-gateway redirect-config update'] = """
type: command
short-summary: Update a redirect configuration.
examples:
- name: Update a redirect configuration to a different http-listener.
text: |
az network application-gateway redirect-config update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRedirectConfig --type Permanent --target-listener MyNewBackendListener
- name: Update a redirect configuration. (autogenerated)
text: |
az network application-gateway redirect-config update --gateway-name MyAppGateway --include-path true --include-query-string true --name MyRedirectConfig --resource-group MyResourceGroup --target-listener MyNewBackendListener --type Permanent
crafted: true
"""
helps['network application-gateway rewrite-rule'] = """
short-summary: Manage rewrite rules of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule condition'] = """
short-summary: Manage rewrite rule conditions of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule condition create'] = """
short-summary: Create a rewrite rule condition.
type: command
parameters:
- name: --variable
populator-commands:
- az network application-gateway rewrite-rule condition list-server-variables
"""
helps['network application-gateway rewrite-rule condition delete'] = """
short-summary: Delete a rewrite rule condition.
type: command
"""
helps['network application-gateway rewrite-rule condition list'] = """
short-summary: List rewrite rule conditions.
type: command
examples:
- name: List rewrite rule conditions. (autogenerated)
text: |
az network application-gateway rewrite-rule condition list --gateway-name MyGateway --resource-group MyResourceGroup --rule-name MyRule --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule condition show'] = """
short-summary: Get the details of a rewrite rule condition.
type: command
"""
helps['network application-gateway rewrite-rule condition update'] = """
short-summary: Update a rewrite rule condition.
type: command
parameters:
- name: --variable
populator-commands:
- az network application-gateway rewrite-rule condition list-server-variables
"""
helps['network application-gateway rewrite-rule create'] = """
short-summary: Create a rewrite rule.
type: command
parameters:
- name: --request-headers
populator-commands:
- az network application-gateway rewrite-rule list-request-headers
- name: --response-headers
populator-commands:
- az network application-gateway rewrite-rule list-response-headers
"""
helps['network application-gateway rewrite-rule delete'] = """
short-summary: Delete a rewrite rule.
type: command
examples:
- name: Delete a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule delete --gateway-name MyGateway --name MyRewriteRule --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule list'] = """
short-summary: List rewrite rules.
type: command
examples:
- name: List rewrite rules. (autogenerated)
text: |
az network application-gateway rewrite-rule list --gateway-name MyGateway --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule set'] = """
short-summary: Manage rewrite rule sets of an application gateway.
type: group
"""
helps['network application-gateway rewrite-rule set create'] = """
short-summary: Create a rewrite rule set.
type: command
examples:
- name: Create a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set create --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set delete'] = """
short-summary: Delete a rewrite rule set.
type: command
"""
helps['network application-gateway rewrite-rule set list'] = """
short-summary: List rewrite rule sets.
type: command
examples:
- name: List rewrite rule sets. (autogenerated)
text: |
az network application-gateway rewrite-rule set list --gateway-name MyGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set show'] = """
short-summary: Get the details of a rewrite rule set.
type: command
examples:
- name: Get the details of a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set show --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule set update'] = """
short-summary: Update a rewrite rule set.
type: command
examples:
- name: Update a rewrite rule set. (autogenerated)
text: |
az network application-gateway rewrite-rule set update --gateway-name MyGateway --name MyRewriteRuleSet --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rewrite-rule show'] = """
short-summary: Get the details of a rewrite rule.
type: command
examples:
- name: Get the details of a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule show --gateway-name MyGateway --name MyRewriteRule --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway rewrite-rule update'] = """
short-summary: Update a rewrite rule.
type: command
parameters:
- name: --request-headers
populator-commands:
- az network application-gateway rewrite-rule list-request-headers
- name: --response-headers
populator-commands:
- az network application-gateway rewrite-rule list-response-headers
examples:
- name: Update a rewrite rule. (autogenerated)
text: |
az network application-gateway rewrite-rule update --gateway-name MyGateway --name MyRewriteRule --resource-group MyResourceGroup --rule-set-name MyRuleSet
crafted: true
"""
helps['network application-gateway root-cert'] = """
type: group
short-summary: Manage trusted root certificates of an application gateway.
"""
helps['network application-gateway root-cert create'] = """
type: command
short-summary: Upload a trusted root certificate.
examples:
- name: Upload a trusted root certificate. (autogenerated)
text: |
az network application-gateway root-cert create --cert-file /path/to/cert/file --gateway-name MyGateway --name MyTrustedRootCertificate --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway root-cert delete'] = """
type: command
short-summary: Delete a trusted root certificate.
examples:
- name: Delete a trusted root certificate.
text: az network application-gateway root-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MyRootCert
"""
helps['network application-gateway root-cert list'] = """
type: command
short-summary: List trusted root certificates.
examples:
- name: List trusted root certificates.
text: az network application-gateway root-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway root-cert show'] = """
type: command
short-summary: Get the details of a trusted root certificate.
examples:
- name: Get the details of a trusted root certificate.
text: az network application-gateway root-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MyRootCert
"""
helps['network application-gateway root-cert update'] = """
type: command
short-summary: Update a trusted root certificate.
examples:
- name: Update a trusted root certificate. (autogenerated)
text: |
az network application-gateway root-cert update --cert-file /path/to/cert/file --gateway-name MyGateway --name MyTrustedRootCertificate --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway rule'] = """
type: group
short-summary: Evaluate probe information and define routing rules.
long-summary: >
For more information, visit, https://docs.microsoft.com/azure/application-gateway/application-gateway-customize-waf-rules-cli
"""
helps['network application-gateway rule create'] = """
type: command
short-summary: Create a rule.
long-summary: Rules are executed in the order in which they are created.
examples:
- name: Create a basic rule.
text: |
az network application-gateway rule create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRule --http-listener MyBackendListener --rule-type Basic --address-pool MyAddressPool --http-settings MyHttpSettings
"""
helps['network application-gateway rule delete'] = """
type: command
short-summary: Delete a rule.
examples:
- name: Delete a rule.
text: az network application-gateway rule delete -g MyResourceGroup --gateway-name MyAppGateway -n MyRule
"""
helps['network application-gateway rule list'] = """
type: command
short-summary: List rules.
examples:
- name: List rules.
text: az network application-gateway rule list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway rule show'] = """
type: command
short-summary: Get the details of a rule.
examples:
- name: Get the details of a rule.
text: az network application-gateway rule show -g MyResourceGroup --gateway-name MyAppGateway -n MyRule
"""
helps['network application-gateway rule update'] = """
type: command
short-summary: Update a rule.
examples:
- name: Update a rule use a new HTTP listener.
text: |
az network application-gateway rule update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyRule --http-listener MyNewBackendListener
- name: Update a rule. (autogenerated)
text: |
az network application-gateway rule update --address-pool MyAddressPool --gateway-name MyAppGateway --name MyRule --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway show'] = """
type: command
short-summary: Get the details of an application gateway.
examples:
- name: Get the details of an application gateway.
text: az network application-gateway show -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway show-backend-health'] = """
type: command
short-summary: Get information on the backend health of an application gateway.
examples:
- name: Show backend health of an application gateway.
text: az network application-gateway show-backend-health -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway ssl-cert'] = """
type: group
short-summary: Manage SSL certificates of an application gateway.
long-summary: For more information visit https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-cli
"""
helps['network application-gateway ssl-cert create'] = """
type: command
short-summary: Upload an SSL certificate.
examples:
- name: Upload an SSL certificate via --cert-file and --cert-password.
text: |
az network application-gateway ssl-cert create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MySSLCert --cert-file \\path\\to\\cert\\file --cert-password Abc123
- name: |-
Upload an SSL certificate via --key-vault-secret-id of a KeyVault Secret
with Base64 encoded value of an unencrypted pfx
text: |-
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \\
-out azure-cli-app-tls.crt \\
-keyout azure-cli-app-tls.key \\
-subj "/CN=azure-cli-app"
openssl pkcs12 -export \\
-in azure-cli-tls.crt \\
-inkey sample-app-tls.key \\
-passout pass: -out azure-cli-cert.pfx
SecretValue=$(cat azure-cli-cert.pfx | base64)
az keyvault secret set --vault-name MyKeyVault --name MySecret --value ${SecretValue}
az network application-gateway ssl-cert create \\
--resource-group MyResourceGroup \\
--gateway-name MyAppGateway \\
-n MySSLCert \\
--key-vault-secret-id MySecretSecretID
- name: |-
Upload an SSL certificate via --key-vault-secret-id of a KeyVault Certificate
text: |-
az keyvault certificate create \\
--vault-name MyKeyVault \\
--name MyCertificate \\
--policy "$(az keyvault certificate get-default-policy)" \\
az network application-gateway ssl-cert create \\
--resource-group MyResourceGroup \\
--gateway-name MyAppGateway \\
-n MySSLCert \\
--key-vault-secret-id MyCertificateSecretID
"""
helps['network application-gateway ssl-cert delete'] = """
type: command
short-summary: Delete an SSL certificate.
examples:
- name: Delete an SSL certificate.
text: az network application-gateway ssl-cert delete -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert
"""
helps['network application-gateway ssl-cert list'] = """
type: command
short-summary: List SSL certificates.
examples:
- name: List SSL certificates.
text: az network application-gateway ssl-cert list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway ssl-cert show'] = """
type: command
short-summary: Get the details of an SSL certificate.
examples:
- name: Get the details of an SSL certificate.
text: az network application-gateway ssl-cert show -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert
- name: Display the expiry date of SSL certificate. The certificate is returned in PKCS7 format from which the expiration date needs to be retrieved.
text: |
publiccert=`az network application-gateway ssl-cert show -g MyResourceGroup --gateway-name MyAppGateway --name mywebsite.com --query publicCertData -o tsv`
echo "-----BEGIN CERTIFICATE-----" >> public.cert; echo "${publiccert}" >> public.cert; echo "-----END CERTIFICATE-----" >> public.cert
cat public.cert | fold -w 64 | openssl pkcs7 -print_certs | openssl x509 -noout -enddate
"""
helps['network application-gateway ssl-cert update'] = """
type: command
short-summary: Update an SSL certificate.
examples:
- name: Change a gateway SSL certificate and password.
text: |
az network application-gateway ssl-cert update -g MyResourceGroup --gateway-name MyAppGateway -n MySslCert \\
--cert-file \\path\\to\\new\\cert\\file --cert-password Abc123Abc123
"""
helps['network application-gateway ssl-policy'] = """
type: group
short-summary: Manage the SSL policy of an application gateway.
"""
helps['network application-gateway ssl-policy list-options'] = """
type: command
short-summary: Lists available SSL options for configuring SSL policy.
examples:
- name: List available SSL options for configuring SSL policy.
text: az network application-gateway ssl-policy list-options
"""
helps['network application-gateway ssl-policy predefined'] = """
type: group
short-summary: Get information on predefined SSL policies.
"""
helps['network application-gateway ssl-policy predefined list'] = """
type: command
short-summary: Lists all SSL predefined policies for configuring SSL policy.
examples:
- name: Lists all SSL predefined policies for configuring SSL policy.
text: az network application-gateway ssl-policy predefined list
"""
helps['network application-gateway ssl-policy predefined show'] = """
type: command
short-summary: Gets SSL predefined policy with the specified policy name.
examples:
- name: Gets SSL predefined policy with the specified policy name.
text: az network application-gateway ssl-policy predefined show -n AppGwSslPolicy20170401
"""
helps['network application-gateway ssl-policy set'] = """
type: command
short-summary: Update or clear SSL policy settings.
long-summary: To view the predefined policies, use `az network application-gateway ssl-policy predefined list`.
parameters:
- name: --cipher-suites
populator-commands:
- az network application-gateway ssl-policy list-options
- name: --disabled-ssl-protocols
populator-commands:
- az network application-gateway ssl-policy list-options
- name: --min-protocol-version
populator-commands:
- az network application-gateway ssl-policy list-options
examples:
- name: Set a predefined SSL policy.
text: |
az network application-gateway ssl-policy set -g MyResourceGroup --gateway-name MyAppGateway \\
-n AppGwSslPolicy20170401S --policy-type Predefined
- name: Set a custom SSL policy with TLSv1_2 and the cipher suites below.
text: |
az network application-gateway ssl-policy set -g MyResourceGroup --gateway-name MyAppGateway \\
--policy-type Custom --min-protocol-version TLSv1_2 \\
--cipher-suites TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 TLS_RSA_WITH_AES_128_GCM_SHA256
"""
helps['network application-gateway ssl-policy show'] = """
type: command
short-summary: Get the details of gateway's SSL policy settings.
examples:
- name: Get the details of a gateway's SSL policy settings.
text: az network application-gateway ssl-policy show -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway start'] = """
type: command
short-summary: Start an application gateway.
examples:
- name: Start an application gateway.
text: az network application-gateway start -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway stop'] = """
type: command
short-summary: Stop an application gateway.
examples:
- name: Stop an application gateway.
text: az network application-gateway stop -g MyResourceGroup -n MyAppGateway
"""
helps['network application-gateway update'] = """
type: command
short-summary: Update an application gateway.
examples:
- name: Update an application gateway. (autogenerated)
text: |
az network application-gateway update --name MyApplicationGateway --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network application-gateway url-path-map'] = """
type: group
short-summary: Manage URL path maps of an application gateway.
"""
helps['network application-gateway url-path-map create'] = """
type: command
short-summary: Create a URL path map.
long-summary: >
The map must be created with at least one rule. This command requires the creation of the
first rule at the time the map is created. To learn more
visit https://docs.microsoft.com/azure/application-gateway/application-gateway-create-url-route-cli
examples:
- name: Create a URL path map with a rule.
text: |
az network application-gateway url-path-map create -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyUrlPathMap --rule-name MyUrlPathMapRule1 --paths /mypath1/* --address-pool MyAddressPool \\
--default-address-pool MyAddressPool --http-settings MyHttpSettings --default-http-settings MyHttpSettings
"""
helps['network application-gateway url-path-map delete'] = """
type: command
short-summary: Delete a URL path map.
examples:
- name: Delete a URL path map.
text: az network application-gateway url-path-map delete -g MyResourceGroup --gateway-name MyAppGateway -n MyUrlPathMap
"""
helps['network application-gateway url-path-map list'] = """
type: command
short-summary: List URL path maps.
examples:
- name: List URL path maps.
text: az network application-gateway url-path-map list -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway url-path-map rule'] = """
type: group
short-summary: Manage the rules of a URL path map.
"""
helps['network application-gateway url-path-map rule create'] = """
type: command
short-summary: Create a rule for a URL path map.
examples:
- name: Create a rule for a URL path map.
text: |
az network application-gateway url-path-map rule create -g MyResourceGroup \\
--gateway-name MyAppGateway -n MyUrlPathMapRule2 --path-map-name MyUrlPathMap \\
--paths /mypath2/* --address-pool MyAddressPool --http-settings MyHttpSettings
"""
helps['network application-gateway url-path-map rule delete'] = """
type: command
short-summary: Delete a rule of a URL path map.
examples:
- name: Delete a rule of a URL path map.
text: |
az network application-gateway url-path-map rule delete -g MyResourceGroup --gateway-name MyAppGateway \\
--path-map-name MyUrlPathMap -n MyUrlPathMapRule2
"""
helps['network application-gateway url-path-map show'] = """
type: command
short-summary: Get the details of a URL path map.
examples:
- name: Get the details of a URL path map.
text: az network application-gateway url-path-map show -g MyResourceGroup --gateway-name MyAppGateway -n MyUrlPathMap
"""
helps['network application-gateway url-path-map update'] = """
type: command
short-summary: Update a URL path map.
examples:
- name: Update a URL path map to use new default HTTP settings.
text: |
az network application-gateway url-path-map update -g MyResourceGroup --gateway-name MyAppGateway \\
-n MyUrlPathMap --default-http-settings MyNewHttpSettings
- name: Update a URL path map. (autogenerated)
text: |
az network application-gateway url-path-map update --default-address-pool MyAddressPool --default-http-settings MyNewHttpSettings --gateway-name MyAppGateway --name MyUrlPathMap --resource-group MyResourceGroup
crafted: true
- name: Update a URL path map. (autogenerated)
text: |
az network application-gateway url-path-map update --gateway-name MyAppGateway --name MyUrlPathMap --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network application-gateway waf-config'] = """
type: group
short-summary: Configure the settings of a web application firewall.
long-summary: >
These commands are only applicable to application gateways with an SKU type of WAF. To learn
more, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-web-application-firewall-cli
"""
helps['network application-gateway waf-config list-rule-sets'] = """
type: command
short-summary: Get information on available WAF rule sets, rule groups, and rule IDs.
parameters:
- name: --group
short-summary: >
List rules for the specified rule group. Use `*` to list rules for all groups.
Omit to suppress listing individual rules.
- name: --type
short-summary: Rule set type to list. Omit to list all types.
- name: --version
short-summary: Rule set version to list. Omit to list all versions.
examples:
- name: List available rule groups in OWASP type rule sets.
text: az network application-gateway waf-config list-rule-sets --type OWASP
- name: List available rules in the OWASP 3.0 rule set.
text: az network application-gateway waf-config list-rule-sets --group '*' --type OWASP --version 3.0
- name: List available rules in the `crs_35_bad_robots` rule group.
text: az network application-gateway waf-config list-rule-sets --group crs_35_bad_robots
- name: List available rules in table format.
text: az network application-gateway waf-config list-rule-sets -o table
"""
helps['network application-gateway waf-config set'] = """
type: command
short-summary: Update the firewall configuration of a web application.
long-summary: >
This command is only applicable to application gateways with an SKU type of WAF. To learn
more, visit https://docs.microsoft.com/azure/application-gateway/application-gateway-web-application-firewall-cli
parameters:
- name: --rule-set-type
short-summary: Rule set type.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --rule-set-version
short-summary: Rule set version.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --disabled-rule-groups
short-summary: Space-separated list of rule groups to disable. To disable individual rules, use `--disabled-rules`.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --disabled-rules
short-summary: Space-separated list of rule IDs to disable.
populator-commands:
- az network application-gateway waf-config list-rule-sets
- name: --exclusion
short-summary: Add an exclusion expression to the WAF check.
long-summary: |
Usage: --exclusion VARIABLE OPERATOR VALUE
Multiple exclusions can be specified by using more than one `--exclusion` argument.
examples:
- name: Configure WAF on an application gateway in detection mode with default values
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --firewall-mode Detection --rule-set-version 3.0
- name: Disable rules for validation of request body parsing and SQL injection.
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --rule-set-type OWASP --rule-set-version 3.0 \\
--disabled-rule-groups REQUEST-942-APPLICATION-ATTACK-SQLI \\
--disabled-rules 920130 920140
- name: Configure WAF on an application gateway with exclusions.
text: |
az network application-gateway waf-config set -g MyResourceGroup --gateway-name MyAppGateway \\
--enabled true --firewall-mode Detection --rule-set-version 3.0 \\
--exclusion "RequestHeaderNames StartsWith x-header" \\
--exclusion "RequestArgNames Equals IgnoreThis"
"""
helps['network application-gateway waf-config show'] = """
type: command
short-summary: Get the firewall configuration of a web application.
examples:
- name: Get the firewall configuration of a web application.
text: az network application-gateway waf-config show -g MyResourceGroup --gateway-name MyAppGateway
"""
helps['network application-gateway waf-policy'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policies.
"""
helps['network application-gateway waf-policy create'] = """
type: command
short-summary: Create an application gateway WAF policy.
examples:
- name: Create an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy create --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy delete'] = """
type: command
short-summary: Delete an application gateway WAF policy.
examples:
- name: Delete an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy delete --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy list'] = """
type: command
short-summary: List application gateway WAF policies.
examples:
- name: List application gateway WAF policies. (autogenerated)
text: |
az network application-gateway waf-policy list --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy policy-setting'] = """
type: group
short-summary: Defines contents of a web application firewall global configuration.
"""
helps['network application-gateway waf-policy policy-setting update'] = """
type: command
short-summary: Update properties of a web application firewall global configuration.
examples:
- name: Update properties of a web application firewall global configuration. (autogenerated)
text: |
az network application-gateway waf-policy policy-setting update --mode Prevention --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy policy-setting list'] = """
type: command
short-summary: List properties of a web application firewall global configuration.
examples:
- name: List properties of a web application firewall global configuration. (autogenerated)
text: |
az network application-gateway waf-policy policy-setting list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policy custom rules.
"""
helps['network application-gateway waf-policy custom-rule create'] = """
type: command
short-summary: Create an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule delete'] = """
type: command
short-summary: Delete an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule list'] = """
type: command
short-summary: List application gateway WAF policy custom rules.
examples:
- name: List application gateway WAF policy custom rules. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule match-condition'] = """
type: group
short-summary: Manage application gateway web application firewall (WAF) policies.
"""
helps['network application-gateway waf-policy custom-rule match-condition add'] = """
type: command
short-summary: A match condition to an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule match-condition list'] = """
type: command
short-summary: List application gateway WAF policy custom rule match conditions.
examples:
- name: List application gateway WAF policy custom rule match conditions. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule match-condition list --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network application-gateway waf-policy custom-rule match-condition remove'] = """
type: command
short-summary: Remove a match condition from an application gateway WAF policy custom rule.
"""
helps['network application-gateway waf-policy custom-rule show'] = """
type: command
short-summary: Get the details of an application gateway WAF policy custom rule.
examples:
- name: Get the details of an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule show --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy custom-rule update'] = """
type: command
short-summary: Update an application gateway WAF policy custom rule.
examples:
- name: Update an application gateway WAF policy custom rule. (autogenerated)
text: |
az network application-gateway waf-policy custom-rule update --name MyWAFPolicyRule --policy-name MyPolicy --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network application-gateway waf-policy managed-rule'] = """
type: group
short-summary: >
Manage managed rules of a waf-policy.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/afds/afds-overview
"""
helps['network application-gateway waf-policy managed-rule rule-set'] = """
type: group
short-summary: Manage managed rule set of managed rules of a WAF policy.
"""
helps['network application-gateway waf-policy managed-rule rule-set add'] = """
type: command
short-summary: >
Add managed rule set to the WAF policy managed rules. For rule set and rules, please visit:
https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
examples:
- name: Disable an attack protection rule
text: |
az network application-gateway waf-policy managed-rule rule-set add --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.1 --group-name REQUEST-921-PROTOCOL-ATTACK --rules 921110
"""
helps['network application-gateway waf-policy managed-rule rule-set update'] = """
type: command
short-summary: >
Manage rules of a WAF policy.
If --group-name and --rules are provided, override existing rules. If --group-name is provided, clear all rules under a certain rule group. If neither of them are provided, update rule set and clear all rules under itself.
For rule set and rules, please visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
examples:
- name: Override rules under rule group EQUEST-921-PROTOCOL-ATTACK
text: |
az network application-gateway waf-policy managed-rule rule-set update --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.1 --group-name REQUEST-921-PROTOCOL-ATTACK --rules 921130 921160
- name: Update the OWASP protocol version from 3.1 to 3.0 which will clear the old rules
text: |
az network application-gateway waf-policy managed-rule rule-set update --policy-name MyPolicy -g MyResourceGroup --type OWASP --version 3.0
"""
helps['network application-gateway waf-policy managed-rule rule-set remove'] = """
type: command
short-summary: >
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
examples:
- name: Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
text: |
az network application-gateway waf-policy managed-rule rule-set remove --policy-name MyPolicy --resource-group MyResourceGroup --type OWASP --version 3.1
"""
helps['network application-gateway waf-policy managed-rule rule-set list'] = """
type: command
short-summary: List all managed rule set.
examples:
- name: List all managed rule set. (autogenerated)
text: |
az network application-gateway waf-policy managed-rule rule-set list --policy-name MyPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy managed-rule exclusion'] = """
type: group
short-summary: Manage OWASP CRS exclusions that are applied on a WAF policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion add'] = """
type: command
short-summary: Add an OWASP CRS exclusion rule to the WAF policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion remove'] = """
type: command
short-summary: List all OWASP CRS exclusion rules that are applied on a Waf policy managed rules.
"""
helps['network application-gateway waf-policy managed-rule exclusion list'] = """
type: command
short-summary: List all OWASP CRS exclusion rules that are applied on a Waf policy managed rules.
"""
helps['network application-gateway waf-policy show'] = """
type: command
short-summary: Get the details of an application gateway WAF policy.
examples:
- name: Get the details of an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy show --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy update'] = """
type: command
short-summary: Update an application gateway WAF policy.
examples:
- name: Update an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy update --add communities='12076:5010' --name MyApplicationGatewayWAFPolicy --resource-group MyResourceGroup
crafted: true
- name: Update an application gateway WAF policy. (autogenerated)
text: |
az network application-gateway waf-policy update --name MyApplicationGatewayWAFPolicy --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network application-gateway waf-policy wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the application gateway WAF policy is met.
"""
helps['network application-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the application gateway is met.
examples:
- name: Place the CLI in a waiting state until the application gateway is created.
text: az network application-gateway wait -g MyResourceGroup -n MyAppGateway --created
"""
helps['network asg'] = """
type: group
short-summary: Manage application security groups (ASGs).
long-summary: >
You can configure network security as a natural extension of an application's structure, ASG allows
you to group virtual machines and define network security policies based on those groups. You can specify an
application security group as the source and destination in a NSG security rule. For more information
visit https://docs.microsoft.com/azure/virtual-network/create-network-security-group-preview
"""
helps['network asg create'] = """
type: command
short-summary: Create an application security group.
parameters:
- name: --name -n
short-summary: Name of the new application security group resource.
examples:
- name: Create an application security group.
text: az network asg create -g MyResourceGroup -n MyAsg --tags MyWebApp, CostCenter=Marketing
"""
helps['network asg delete'] = """
type: command
short-summary: Delete an application security group.
examples:
- name: Delete an application security group.
text: az network asg delete -g MyResourceGroup -n MyAsg
"""
helps['network asg list'] = """
type: command
short-summary: List all application security groups in a subscription.
examples:
- name: List all application security groups in a subscription.
text: az network asg list
"""
helps['network asg show'] = """
type: command
short-summary: Get details of an application security group.
examples:
- name: Get details of an application security group.
text: az network asg show -g MyResourceGroup -n MyAsg
"""
helps['network asg update'] = """
type: command
short-summary: Update an application security group.
long-summary: >
This command can only be used to update the tags for an application security group.
Name and resource group are immutable and cannot be updated.
examples:
- name: Update an application security group with a modified tag value.
text: az network asg update -g MyResourceGroup -n MyAsg --set tags.CostCenter=MyBusinessGroup
"""
helps['network ddos-protection'] = """
type: group
short-summary: Manage DDoS Protection Plans.
"""
helps['network ddos-protection create'] = """
type: command
short-summary: Create a DDoS protection plan.
parameters:
- name: --vnets
long-summary: >
This parameter can only be used if all the VNets are within the same subscription as
the DDoS protection plan. If this is not the case, set the protection plan on the VNet
directly using the `az network vnet update` command.
examples:
- name: Create a DDoS protection plan.
text: az network ddos-protection create -g MyResourceGroup -n MyDdosPlan
- name: Create a DDoS protection plan. (autogenerated)
text: |
az network ddos-protection create --location westus2 --name MyDdosPlan --resource-group MyResourceGroup
crafted: true
"""
helps['network ddos-protection delete'] = """
type: command
short-summary: Delete a DDoS protection plan.
examples:
- name: Delete a DDoS protection plan.
text: az network ddos-protection delete -g MyResourceGroup -n MyDdosPlan
"""
helps['network ddos-protection list'] = """
type: command
short-summary: List DDoS protection plans.
examples:
- name: List DDoS protection plans
text: az network ddos-protection list
"""
helps['network ddos-protection show'] = """
type: command
short-summary: Show details of a DDoS protection plan.
examples:
- name: Show details of a DDoS protection plan.
text: az network ddos-protection show -g MyResourceGroup -n MyDdosPlan
"""
helps['network ddos-protection update'] = """
type: command
short-summary: Update a DDoS protection plan.
parameters:
- name: --vnets
long-summary: >
This parameter can only be used if all the VNets are within the same subscription as
the DDoS protection plan. If this is not the case, set the protection plan on the VNet
directly using the `az network vnet update` command.
examples:
- name: Add a Vnet to a DDoS protection plan in the same subscription.
text: az network ddos-protection update -g MyResourceGroup -n MyDdosPlan --vnets MyVnet
- name: Update a DDoS protection plan. (autogenerated)
text: |
az network ddos-protection update --name MyDdosPlan --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network dns'] = """
type: group
short-summary: Manage DNS domains in Azure.
"""
helps['network dns record-set'] = """
type: group
short-summary: Manage DNS records and record sets.
"""
helps['network dns record-set a'] = """
type: group
short-summary: Manage DNS A records.
"""
helps['network dns record-set a add-record'] = """
type: command
short-summary: Add an A record.
examples:
- name: Add an A record.
text: |
az network dns record-set a add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv4Address
"""
helps['network dns record-set a create'] = """
type: command
short-summary: Create an empty A record set.
examples:
- name: Create an empty A record set.
text: az network dns record-set a create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty A record set. (autogenerated)
text: |
az network dns record-set a create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set a delete'] = """
type: command
short-summary: Delete an A record set and all associated records.
examples:
- name: Delete an A record set and all associated records.
text: az network dns record-set a delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set a list'] = """
type: command
short-summary: List all A record sets in a zone.
examples:
- name: List all A record sets in a zone.
text: az network dns record-set a list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set a remove-record'] = """
type: command
short-summary: Remove an A record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an A record from its record set.
text: |
az network dns record-set a remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv4Address
"""
helps['network dns record-set a show'] = """
type: command
short-summary: Get the details of an A record set.
examples:
- name: Get the details of an A record set.
text: az network dns record-set a show -g MyResourceGroup -n MyRecordSet -z www.mysite.com
"""
helps['network dns record-set a update'] = """
type: command
short-summary: Update an A record set.
examples:
- name: Update an A record set.
text: |
az network dns record-set a update -g MyResourceGroup -n MyRecordSet \\
-z www.mysite.com --metadata owner=WebTeam
- name: Update an A record set. (autogenerated)
text: |
az network dns record-set a update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set aaaa'] = """
type: group
short-summary: Manage DNS AAAA records.
"""
helps['network dns record-set aaaa add-record'] = """
type: command
short-summary: Add an AAAA record.
examples:
- name: Add an AAAA record.
text: |
az network dns record-set aaaa add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv6Address
"""
helps['network dns record-set aaaa create'] = """
type: command
short-summary: Create an empty AAAA record set.
examples:
- name: Create an empty AAAA record set.
text: az network dns record-set aaaa create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa delete'] = """
type: command
short-summary: Delete an AAAA record set and all associated records.
examples:
- name: Delete an AAAA record set and all associated records.
text: az network dns record-set aaaa delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa list'] = """
type: command
short-summary: List all AAAA record sets in a zone.
examples:
- name: List all AAAA record sets in a zone.
text: az network dns record-set aaaa list -g MyResourceGroup -z www.mysite.com
- name: List all AAAA record sets in a zone. (autogenerated)
text: |
az network dns record-set aaaa list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set aaaa remove-record'] = """
type: command
short-summary: Remove AAAA record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an AAAA record from its record set.
text: |
az network dns record-set aaaa remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -a MyIpv6Address
"""
helps['network dns record-set aaaa show'] = """
type: command
short-summary: Get the details of an AAAA record set.
examples:
- name: Get the details of an AAAA record set.
text: az network dns record-set aaaa show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set aaaa update'] = """
type: command
short-summary: Update an AAAA record set.
examples:
- name: Update an AAAA record set.
text: |
az network dns record-set aaaa update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set caa'] = """
type: group
short-summary: Manage DNS CAA records.
"""
helps['network dns record-set caa add-record'] = """
type: command
short-summary: Add a CAA record.
examples:
- name: Add a CAA record.
text: |
az network dns record-set caa add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --flags 0 --tag "issue" --value "ca.contoso.com"
"""
helps['network dns record-set caa create'] = """
type: command
short-summary: Create an empty CAA record set.
examples:
- name: Create an empty CAA record set.
text: az network dns record-set caa create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty CAA record set. (autogenerated)
text: |
az network dns record-set caa create --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set caa delete'] = """
type: command
short-summary: Delete a CAA record set and all associated records.
examples:
- name: Delete a CAA record set and all associated records.
text: az network dns record-set caa delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Delete a CAA record set and all associated records. (autogenerated)
text: |
az network dns record-set caa delete --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set caa list'] = """
type: command
short-summary: List all CAA record sets in a zone.
examples:
- name: List all CAA record sets in a zone.
text: az network dns record-set caa list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set caa remove-record'] = """
type: command
short-summary: Remove a CAA record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a CAA record from its record set.
text: |
az network dns record-set caa remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --flags 0 --tag "issue" --value "ca.contoso.com"
"""
helps['network dns record-set caa show'] = """
type: command
short-summary: Get the details of a CAA record set.
examples:
- name: Get the details of a CAA record set.
text: az network dns record-set caa show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set caa update'] = """
type: command
short-summary: Update a CAA record set.
examples:
- name: Update a CAA record set.
text: |
az network dns record-set caa update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set cname'] = """
type: group
short-summary: Manage DNS CNAME records.
"""
helps['network dns record-set cname create'] = """
type: command
short-summary: Create an empty CNAME record set.
examples:
- name: Create an empty CNAME record set.
text: az network dns record-set cname create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty CNAME record set. (autogenerated)
text: |
az network dns record-set cname create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set cname delete'] = """
type: command
short-summary: Delete a CNAME record set and its associated record.
examples:
- name: Delete a CNAME record set and its associated record.
text: az network dns record-set cname delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set cname list'] = """
type: command
short-summary: List the CNAME record set in a zone.
examples:
- name: List the CNAME record set in a zone.
text: az network dns record-set cname list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set cname remove-record'] = """
type: command
short-summary: Remove a CNAME record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a CNAME record from its record set.
text: |
az network dns record-set cname remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -c www.contoso.com
"""
helps['network dns record-set cname set-record'] = """
type: command
short-summary: Set the value of a CNAME record.
examples:
- name: Set the value of a CNAME record.
text: |
az network dns record-set cname set-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -c www.contoso.com
"""
helps['network dns record-set cname show'] = """
type: command
short-summary: Get the details of a CNAME record set.
examples:
- name: Get the details of a CNAME record set.
text: az network dns record-set cname show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set list'] = """
type: command
short-summary: List all record sets within a DNS zone.
examples:
- name: List all "@" record sets within this zone.
text: az network dns record-set list -g MyResourceGroup -z www.mysite.com --query "[?name=='@']"
"""
helps['network dns record-set mx'] = """
type: group
short-summary: Manage DNS MX records.
"""
helps['network dns record-set mx add-record'] = """
type: command
short-summary: Add an MX record.
examples:
- name: Add an MX record.
text: |
az network dns record-set mx add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -e mail.mysite.com -p 10
"""
helps['network dns record-set mx create'] = """
type: command
short-summary: Create an empty MX record set.
examples:
- name: Create an empty MX record set.
text: az network dns record-set mx create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty MX record set. (autogenerated)
text: |
az network dns record-set mx create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set mx delete'] = """
type: command
short-summary: Delete an MX record set and all associated records.
examples:
- name: Delete an MX record set and all associated records.
text: az network dns record-set mx delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set mx list'] = """
type: command
short-summary: List all MX record sets in a zone.
examples:
- name: List all MX record sets in a zone.
text: az network dns record-set mx list -g MyResourceGroup -z www.mysite.com
- name: List all MX record sets in a zone (autogenerated)
text: |
az network dns record-set mx list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set mx remove-record'] = """
type: command
short-summary: Remove an MX record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an MX record from its record set.
text: |
az network dns record-set mx remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -e mail.mysite.com -p 10
"""
helps['network dns record-set mx show'] = """
type: command
short-summary: Get the details of an MX record set.
examples:
- name: Get the details of an MX record set.
text: az network dns record-set mx show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set mx update'] = """
type: command
short-summary: Update an MX record set.
examples:
- name: Update an MX record set.
text: |
az network dns record-set mx update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update an MX record set. (autogenerated)
text: |
az network dns record-set mx update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns'] = """
type: group
short-summary: Manage DNS NS records.
"""
helps['network dns record-set ns add-record'] = """
type: command
short-summary: Add an NS record.
examples:
- name: Add an NS record.
text: |
az network dns record-set ns add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d ns.mysite.com
"""
helps['network dns record-set ns create'] = """
type: command
short-summary: Create an empty NS record set.
examples:
- name: Create an empty NS record set.
text: az network dns record-set ns create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty NS record set. (autogenerated)
text: |
az network dns record-set ns create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns delete'] = """
type: command
short-summary: Delete an NS record set and all associated records.
examples:
- name: Delete an NS record set and all associated records.
text: az network dns record-set ns delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Delete an NS record set and all associated records. (autogenerated)
text: |
az network dns record-set ns delete --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --yes --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns list'] = """
type: command
short-summary: List all NS record sets in a zone.
examples:
- name: List all NS record sets in a zone.
text: az network dns record-set ns list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set ns remove-record'] = """
type: command
short-summary: Remove an NS record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an NS record from its record set.
text: |
az network dns record-set ns remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d ns.mysite.com
- name: Remove an NS record from its record set. (autogenerated)
text: |
az network dns record-set ns remove-record --keep-empty-record-set --nsdname ns.mysite.com --record-set-name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ns show'] = """
type: command
short-summary: Get the details of an NS record set.
examples:
- name: Get the details of an NS record set.
text: az network dns record-set ns show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ns update'] = """
type: command
short-summary: Update an NS record set.
examples:
- name: Update an NS record set.
text: |
az network dns record-set ns update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update an NS record set. (autogenerated)
text: |
az network dns record-set ns update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr'] = """
type: group
short-summary: Manage DNS PTR records.
"""
helps['network dns record-set ptr add-record'] = """
type: command
short-summary: Add a PTR record.
examples:
- name: Add a PTR record.
text: |
az network dns record-set ptr add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d another.site.com
"""
helps['network dns record-set ptr create'] = """
type: command
short-summary: Create an empty PTR record set.
examples:
- name: Create an empty PTR record set.
text: az network dns record-set ptr create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ptr delete'] = """
type: command
short-summary: Delete a PTR record set and all associated records.
examples:
- name: Delete a PTR record set and all associated records.
text: az network dns record-set ptr delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ptr list'] = """
type: command
short-summary: List all PTR record sets in a zone.
examples:
- name: List all PTR record sets in a zone.
text: az network dns record-set ptr list -g MyResourceGroup -z www.mysite.com
- name: List all PTR record sets in a zone. (autogenerated)
text: |
az network dns record-set ptr list --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set ptr remove-record'] = """
type: command
short-summary: Remove a PTR record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a PTR record from its record set.
text: |
az network dns record-set ptr remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -d another.site.com
"""
helps['network dns record-set ptr show'] = """
type: command
short-summary: Get the details of a PTR record set.
examples:
- name: Get the details of a PTR record set.
text: az network dns record-set ptr show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set ptr update'] = """
type: command
short-summary: Update a PTR record set.
examples:
- name: Update a PTR record set.
text: |
az network dns record-set ptr update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set soa'] = """
type: group
short-summary: Manage a DNS SOA record.
"""
helps['network dns record-set soa show'] = """
type: command
short-summary: Get the details of an SOA record.
examples:
- name: Get the details of an SOA record.
text: az network dns record-set soa show -g MyResourceGroup -z www.mysite.com
- name: Get the details of an SOA record (autogenerated)
text: |
az network dns record-set soa show --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set soa update'] = """
type: command
short-summary: Update properties of an SOA record.
examples:
- name: Update properties of an SOA record.
text: |
az network dns record-set soa update -g MyResourceGroup -z www.mysite.com \\
-e myhostmaster.mysite.com
"""
helps['network dns record-set srv'] = """
type: group
short-summary: Manage DNS SRV records.
"""
helps['network dns record-set srv add-record'] = """
type: command
short-summary: Add an SRV record.
examples:
- name: Add an SRV record.
text: |
az network dns record-set srv add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -t webserver.mysite.com -r 8081 -p 10 -w 10
"""
helps['network dns record-set srv create'] = """
type: command
short-summary: Create an empty SRV record set.
examples:
- name: Create an empty SRV record set.
text: |
az network dns record-set srv create -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet
- name: Create an empty SRV record set. (autogenerated)
text: |
az network dns record-set srv create --metadata owner=WebTeam --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set srv delete'] = """
type: command
short-summary: Delete an SRV record set and all associated records.
examples:
- name: Delete an SRV record set and all associated records.
text: az network dns record-set srv delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set srv list'] = """
type: command
short-summary: List all SRV record sets in a zone.
examples:
- name: List all SRV record sets in a zone.
text: az network dns record-set srv list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set srv remove-record'] = """
type: command
short-summary: Remove an SRV record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove an SRV record from its record set.
text: |
az network dns record-set srv remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -t webserver.mysite.com -r 8081 -p 10 -w 10
"""
helps['network dns record-set srv show'] = """
type: command
short-summary: Get the details of an SRV record set.
examples:
- name: Get the details of an SRV record set.
text: az network dns record-set srv show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set srv update'] = """
type: command
short-summary: Update an SRV record set.
examples:
- name: Update an SRV record set.
text: |
az network dns record-set srv update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
"""
helps['network dns record-set txt'] = """
type: group
short-summary: Manage DNS TXT records.
"""
helps['network dns record-set txt add-record'] = """
type: command
short-summary: Add a TXT record.
examples:
- name: Add a TXT record.
text: |
az network dns record-set txt add-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -v Owner=WebTeam
"""
helps['network dns record-set txt create'] = """
type: command
short-summary: Create an empty TXT record set.
examples:
- name: Create an empty TXT record set.
text: az network dns record-set txt create -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Create an empty TXT record set. (autogenerated)
text: |
az network dns record-set txt create --name MyRecordSet --resource-group MyResourceGroup --ttl 30 --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set txt delete'] = """
type: command
short-summary: Delete a TXT record set and all associated records.
examples:
- name: Delete a TXT record set and all associated records.
text: az network dns record-set txt delete -g MyResourceGroup -z www.mysite.com -n MyRecordSet
"""
helps['network dns record-set txt list'] = """
type: command
short-summary: List all TXT record sets in a zone.
examples:
- name: List all TXT record sets in a zone.
text: az network dns record-set txt list -g MyResourceGroup -z www.mysite.com
"""
helps['network dns record-set txt remove-record'] = """
type: command
short-summary: Remove a TXT record from its record set.
long-summary: >
By default, if the last record in a set is removed, the record set is deleted.
To retain the empty record set, include --keep-empty-record-set.
examples:
- name: Remove a TXT record from its record set.
text: |
az network dns record-set txt remove-record -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet -v Owner=WebTeam
"""
helps['network dns record-set txt show'] = """
type: command
short-summary: Get the details of a TXT record set.
examples:
- name: Get the details of a TXT record set.
text: az network dns record-set txt show -g MyResourceGroup -z www.mysite.com -n MyRecordSet
- name: Get the details of a TXT record set. (autogenerated)
text: |
az network dns record-set txt show --name MyRecordSet --resource-group MyResourceGroup --subscription MySubscription --zone-name www.mysite.com
crafted: true
"""
helps['network dns record-set txt update'] = """
type: command
short-summary: Update a TXT record set.
examples:
- name: Update a TXT record set.
text: |
az network dns record-set txt update -g MyResourceGroup -z www.mysite.com \\
-n MyRecordSet --metadata owner=WebTeam
- name: Update a TXT record set. (autogenerated)
text: |
az network dns record-set txt update --name MyRecordSet --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup --zone-name www.mysite.com
crafted: true
"""
helps['network dns zone'] = """
type: group
short-summary: Manage DNS zones.
"""
helps['network dns zone create'] = """
type: command
short-summary: Create a DNS zone.
parameters:
- name: --if-none-match
short-summary: Only create a DNS zone if one doesn't exist that matches the given name.
examples:
- name: Create a DNS zone using a fully qualified domain name.
text: >
az network dns zone create -g MyResourceGroup -n www.mysite.com
- name: Create a DNS zone with delegation in the parent within the same subscription and resource group
text: >
az network dns zone create -g MyResourceGroup -n books.mysite.com -p mysite.com
- name: Create a DNS zone with delegation in the parent in different subscription
text: >
az network dns zone create -g MyResourceGroup -n books.mysite.com -p "/subscriptions/**67e2/resourceGroups/OtherRg/providers/Microsoft.Network/dnszones/mysite.com"
"""
helps['network dns zone delete'] = """
type: command
short-summary: Delete a DNS zone and all associated records.
examples:
- name: Delete a DNS zone using a fully qualified domain name.
text: >
az network dns zone delete -g MyResourceGroup -n www.mysite.com
"""
helps['network dns zone export'] = """
type: command
short-summary: Export a DNS zone as a DNS zone file.
examples:
- name: Export a DNS zone as a DNS zone file.
text: >
az network dns zone export -g MyResourceGroup -n www.mysite.com -f mysite_com_zone.txt
"""
helps['network dns zone import'] = """
type: command
short-summary: Create a DNS zone using a DNS zone file.
examples:
- name: Import a local zone file into a DNS zone resource.
text: >
az network dns zone import -g MyResourceGroup -n MyZone -f /path/to/zone/file
"""
helps['network dns zone list'] = """
type: command
short-summary: List DNS zones.
examples:
- name: List DNS zones in a resource group.
text: >
az network dns zone list -g MyResourceGroup
"""
helps['network dns zone show'] = """
type: command
short-summary: Get a DNS zone parameters. Does not show DNS records within the zone.
examples:
- name: List DNS zones in a resource group.
text: >
az network dns zone show -g MyResourceGroup -n www.mysite.com
"""
helps['network dns zone update'] = """
type: command
short-summary: Update a DNS zone properties. Does not modify DNS records within the zone.
parameters:
- name: --if-match
short-summary: Update only if the resource with the same ETAG exists.
examples:
- name: Update a DNS zone properties to change the user-defined value of a previously set tag.
text: >
az network dns zone update -g MyResourceGroup -n www.mysite.com --tags CostCenter=Marketing
- name: Update a DNS zone properties (autogenerated)
text: |
az network dns zone update --name www.mysite.com --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route'] = """
type: group
short-summary: Manage dedicated private network fiber connections to Azure.
long-summary: >
To learn more about ExpressRoute circuits visit
https://docs.microsoft.com/azure/expressroute/howto-circuit-cli
"""
helps['network express-route auth'] = """
type: group
short-summary: Manage authentication of an ExpressRoute circuit.
long-summary: >
To learn more about ExpressRoute circuit authentication visit
https://docs.microsoft.com/azure/expressroute/howto-linkvnet-cli#connect-a-virtual-network-in-a-different-subscription-to-a-circuit
"""
helps['network express-route auth create'] = """
type: command
short-summary: Create a new link authorization for an ExpressRoute circuit.
examples:
- name: Create a new link authorization for an ExpressRoute circuit.
text: >
az network express-route auth create --circuit-name MyCircuit -g MyResourceGroup -n MyAuthorization
"""
helps['network express-route auth delete'] = """
type: command
short-summary: Delete a link authorization of an ExpressRoute circuit.
examples:
- name: Delete a link authorization of an ExpressRoute circuit.
text: >
az network express-route auth delete --circuit-name MyCircuit -g MyResourceGroup -n MyAuthorization
"""
helps['network express-route auth list'] = """
type: command
short-summary: List link authorizations of an ExpressRoute circuit.
examples:
- name: List link authorizations of an ExpressRoute circuit.
text: >
az network express-route auth list -g MyResourceGroup --circuit-name MyCircuit
- name: List link authorizations of an ExpressRoute circuit. (autogenerated)
text: |
az network express-route auth list --circuit-name MyCircuit --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network express-route auth show'] = """
type: command
short-summary: Get the details of a link authorization of an ExpressRoute circuit.
examples:
- name: Get the details of a link authorization of an ExpressRoute circuit.
text: >
az network express-route auth show -g MyResourceGroup --circuit-name MyCircuit -n MyAuthorization
"""
helps['network express-route create'] = """
type: command
short-summary: Create an ExpressRoute circuit.
parameters:
- name: --bandwidth
populator-commands:
- az network express-route list-service-providers
- name: --peering-location
populator-commands:
- az network express-route list-service-providers
- name: --provider
populator-commands:
- az network express-route list-service-providers
examples:
- name: Create an ExpressRoute circuit.
text: >
az network express-route create --bandwidth 200 -n MyCircuit --peering-location "Silicon Valley" -g MyResourceGroup --provider "Equinix" -l "West US" --sku-family MeteredData --sku-tier Standard
"""
helps['network express-route delete'] = """
type: command
short-summary: Delete an ExpressRoute circuit.
examples:
- name: Delete an ExpressRoute circuit.
text: >
az network express-route delete -n MyCircuit -g MyResourceGroup
"""
helps['network express-route gateway'] = """
type: group
short-summary: Manage ExpressRoute gateways.
"""
helps['network express-route gateway connection'] = """
type: group
short-summary: Manage ExpressRoute gateway connections.
"""
helps['network express-route gateway connection create'] = """
type: command
short-summary: Create an ExpressRoute gateway connection.
examples:
- name: Create an ExpressRoute gateway connection.
text: |
az network express-route gateway connection create --gateway-name MyGateway -n MyExpressRouteConnection -g MyResourceGroup --peering /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/expressRouteCircuits/MyCircuit/peerings/AzurePrivatePeering --associated-route-table /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
"""
helps['network express-route gateway connection delete'] = """
type: command
short-summary: Delete an ExpressRoute gateway connection.
examples:
- name: Delete an ExpressRoute gateway connection. (autogenerated)
text: |
az network express-route gateway connection delete --gateway-name MyGateway --name MyExpressRouteConnection --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection list'] = """
type: command
short-summary: List ExpressRoute gateway connections.
examples:
- name: List ExpressRoute gateway connections. (autogenerated)
text: |
az network express-route gateway connection list --gateway-name MyGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection show'] = """
type: command
short-summary: Get the details of an ExpressRoute gateway connection.
examples:
- name: Get the details of an ExpressRoute gateway connection. (autogenerated)
text: |
az network express-route gateway connection show --gateway-name MyGateway --name MyExpressRouteConnection --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway connection update'] = """
type: command
short-summary: Update an ExpressRoute gateway connection.
examples:
- name: Update an ExpressRoute gateway connection.
text: |
az network express-route gateway connection update --gateway-name MyGateway -n MyExpressRouteConnection -g MyResourceGroup --peering /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/expressRouteCircuits/MyCircuit/peerings/AzurePrivatePeering --associated-route-table /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
"""
helps['network express-route gateway create'] = """
type: command
short-summary: Create an ExpressRoute gateway.
"""
helps['network express-route gateway delete'] = """
type: command
short-summary: Delete an ExpressRoute gateway.
examples:
- name: Delete an ExpressRoute gateway. (autogenerated)
text: |
az network express-route gateway delete --name MyExpressRouteGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway list'] = """
type: command
short-summary: List ExpressRoute gateways.
examples:
- name: List ExpressRoute gateways. (autogenerated)
text: |
az network express-route gateway list --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway show'] = """
type: command
short-summary: Get the details of an ExpressRoute gateway.
examples:
- name: Get the details of an ExpressRoute gateway. (autogenerated)
text: |
az network express-route gateway show --name MyExpressRouteGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route gateway update'] = """
type: command
short-summary: Update settings of an ExpressRoute gateway.
"""
helps['network express-route get-stats'] = """
type: command
short-summary: Get the statistics of an ExpressRoute circuit.
examples:
- name: Get the statistics of an ExpressRoute circuit.
text: >
az network express-route get-stats -g MyResourceGroup -n MyCircuit
"""
helps['network express-route list'] = """
type: command
short-summary: List all ExpressRoute circuits for the current subscription.
examples:
- name: List all ExpressRoute circuits for the current subscription.
text: >
az network express-route list -g MyResourceGroup
"""
helps['network express-route list-arp-tables'] = """
type: command
short-summary: Show the current Address Resolution Protocol (ARP) table of an ExpressRoute circuit.
examples:
- name: Show the current Address Resolution Protocol (ARP) table of an ExpressRoute circuit.
text: |
az network express-route list-arp-tables -g MyResourceGroup -n MyCircuit \\
--path primary --peering-name AzurePrivatePeering
"""
helps['network express-route list-route-tables'] = """
type: command
short-summary: Show the current routing table of an ExpressRoute circuit peering.
examples:
- name: Show the current routing table of an ExpressRoute circuit peering.
text: |
az network express-route list-route-tables -g MyResourceGroup -n MyCircuit \\
--path primary --peering-name AzurePrivatePeering
"""
helps['network express-route list-service-providers'] = """
type: command
short-summary: List available ExpressRoute service providers.
examples:
- name: List available ExpressRoute service providers.
text: az network express-route list-service-providers
"""
helps['network express-route peering'] = """
type: group
short-summary: Manage ExpressRoute peering of an ExpressRoute circuit.
"""
helps['network express-route peering connection'] = """
type: group
short-summary: Manage ExpressRoute circuit connections.
"""
helps['network express-route peering connection create'] = """
type: command
short-summary: Create connections between two ExpressRoute circuits.
examples:
- name: Create connection between two ExpressRoute circuits with AzurePrivatePeering settings.
text: |
az network express-route peering connection create -g MyResourceGroup --circuit-name \\
MyCircuit --peering-name AzurePrivatePeering -n myConnection --peer-circuit \\
MyOtherCircuit --address-prefix 104.0.0.0/29
"""
helps['network express-route peering connection delete'] = """
type: command
short-summary: Delete an ExpressRoute circuit connection.
examples:
- name: Delete an ExpressRoute circuit connection. (autogenerated)
text: |
az network express-route peering connection delete --circuit-name MyCircuit --name MyPeeringConnection --peering-name MyPeering --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route peering connection show'] = """
type: command
short-summary: Get the details of an ExpressRoute circuit connection.
"""
helps['network express-route peering create'] = """
type: command
short-summary: Create peering settings for an ExpressRoute circuit.
examples:
- name: Create Microsoft Peering settings with IPv4 configuration.
text: |
az network express-route peering create -g MyResourceGroup --circuit-name MyCircuit \\
--peering-type MicrosoftPeering --peer-asn 10002 --vlan-id 103 \\
--primary-peer-subnet 101.0.0.0/30 --secondary-peer-subnet 102.0.0.0/30 \\
--advertised-public-prefixes 101.0.0.0/30
"""
helps['network express-route peering delete'] = """
type: command
short-summary: Delete peering settings.
examples:
- name: Delete private peering.
text: >
az network express-route peering delete -g MyResourceGroup --circuit-name MyCircuit -n AzurePrivatePeering
"""
helps['network express-route peering list'] = """
type: command
short-summary: List peering settings of an ExpressRoute circuit.
examples:
- name: List peering settings of an ExpressRoute circuit.
text: >
az network express-route peering list -g MyResourceGroup --circuit-name MyCircuit
"""
helps['network express-route peering peer-connection'] = """
type: group
short-summary: Manage ExpressRoute circuit peer connections.
"""
helps['network express-route peering show'] = """
type: command
short-summary: Get the details of an express route peering.
examples:
- name: Get private peering details of an ExpressRoute circuit.
text: >
az network express-route peering show -g MyResourceGroup --circuit-name MyCircuit -n AzurePrivatePeering
"""
helps['network express-route peering update'] = """
type: command
short-summary: Update peering settings of an ExpressRoute circuit.
examples:
- name: Add IPv6 Microsoft Peering settings to existing IPv4 config.
text: |
az network express-route peering update -g MyResourceGroup --circuit-name MyCircuit \\
--ip-version ipv6 --primary-peer-subnet 2002:db00::/126 \\
--secondary-peer-subnet 2003:db00::/126 --advertised-public-prefixes 2002:db00::/126
supported-profiles: latest
- name: Update peering settings of an ExpressRoute circuit. (autogenerated)
text: |
az network express-route peering update --circuit-name MyCircuit --name MyPeering --peer-asn 10002 --primary-peer-subnet 2002:db00::/126 --resource-group MyResourceGroup --secondary-peer-subnet 2003:db00::/126 --shared-key Abc123 --vlan-id 103
crafted: true
"""
helps['network express-route port'] = """
type: group
short-summary: Manage ExpressRoute ports.
"""
helps['network express-route port create'] = """
type: command
short-summary: Create an ExpressRoute port.
examples:
- name: Create an ExpressRoute port. (autogenerated)
text: |
az network express-route port create --bandwidth 200 --encapsulation Dot1Q --location westus2 --name MyExpressRoutePort --peering-location westus --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port delete'] = """
type: command
short-summary: Delete an ExpressRoute port.
examples:
- name: Delete an ExpressRoute port. (autogenerated)
text: |
az network express-route port delete --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port link'] = """
type: group
short-summary: View ExpressRoute links.
"""
helps['network express-route port link list'] = """
type: command
short-summary: List ExpressRoute links.
examples:
- name: List ExpressRoute links. (autogenerated)
text: |
az network express-route port link list --port-name MyPort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port link show'] = """
type: command
short-summary: Get the details of an ExpressRoute link.
examples:
- name: Get the details of an ExpressRoute link. (autogenerated)
text: |
az network express-route port link show --name MyLinkExpressRoutePort --port-name MyPort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port link update'] = """
type: command
short-summary: Manage MACsec configuration of an ExpressRoute Link.
examples:
- name: Enable MACsec on ExpressRoute Direct Ports once at a time.
text: |-
az network express-route port link update \\
--resource-group MyResourceGroup \\
--port-name MyExpressRoutePort \\
--name link1 \\
--macsec-ckn-secret-identifier MacSecCKNSecretID \\
--macsec-cak-secret-identifier MacSecCAKSecretID \\
--macsec-cipher gcm-aes-128
- name: Enable administrative state of an ExpressRoute Link.
text: |-
az network express-route port link update \\
--resource-group MyResourceGroup \\
--port-name MyExpressRoutePort \\
--name link2 \\
--admin-state Enabled
"""
helps['network express-route port list'] = """
type: command
short-summary: List ExpressRoute ports.
"""
helps['network express-route port location'] = """
type: group
short-summary: View ExpressRoute port location information.
"""
helps['network express-route port location list'] = """
type: command
short-summary: List ExpressRoute port locations.
"""
helps['network express-route port location show'] = """
type: command
short-summary: Get the details of an ExpressRoute port location.
examples:
- name: Get the details of an ExpressRoute port location. (autogenerated)
text: |
az network express-route port location show --location westus2
crafted: true
"""
helps['network express-route port show'] = """
type: command
short-summary: Get the details of an ExpressRoute port.
examples:
- name: Get the details of an ExpressRoute port. (autogenerated)
text: |
az network express-route port show --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port update'] = """
type: command
short-summary: Update settings of an ExpressRoute port.
examples:
- name: Update settings of an ExpressRoute port (autogenerated)
text: |
az network express-route port update --name MyExpressRoutePort --resource-group MyResourceGroup
crafted: true
"""
helps['network express-route port identity'] = """
type: group
short-summary: Manage the managed service identity of an ExpressRoute Port
"""
helps['network express-route port identity assign'] = """
type: command
short-summary: Assign a managed service identity to an ExpressRoute Port
examples:
- name: Assign an identity to the ExpressRoute Port
text: |-
az network express-route port identity assign \\
--resource-group MyResourceGroupg \\
--name MyExpressRoutePort \\
--identity MyUserAssignedManagedServiceIdentity
"""
helps['network express-route port identity remove'] = """
type: command
short-summary: Remove the managed service identity of an ExpressRoute Port
examples:
- name: Remove an identity of the ExpressRoute Port
text: az network express-route port identity remove -g MyResourceGroup --name MyExpressRoutePort
"""
helps['network express-route port identity show'] = """
type: command
short-summary: Show the managed service identity of an ExpressRoute Port
examples:
- name: Show an identity of the ExpressRoute Port
text: az network express-route port identity show -g MyResourceGroup --name MyExpressRoutePort
"""
helps['network express-route show'] = """
type: command
short-summary: Get the details of an ExpressRoute circuit.
examples:
- name: Get the details of an ExpressRoute circuit.
text: >
az network express-route show -n MyCircuit -g MyResourceGroup
"""
helps['network express-route update'] = """
type: command
short-summary: Update settings of an ExpressRoute circuit.
examples:
- name: Change the SKU of an ExpressRoute circuit from Standard to Premium.
text: >
az network express-route update -n MyCircuit -g MyResourceGroup --sku-tier Premium
"""
helps['network express-route wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the ExpressRoute is met.
examples:
- name: Pause executing next line of CLI script until the ExpressRoute circuit is successfully provisioned.
text: az network express-route wait -n MyCircuit -g MyResourceGroup --created
"""
helps['network lb'] = """
type: group
short-summary: Manage and configure load balancers.
long-summary: To learn more about Azure Load Balancer visit https://docs.microsoft.com/azure/load-balancer/load-balancer-get-started-internet-arm-cli
"""
helps['network lb address-pool'] = """
type: group
short-summary: Manage address pools of a load balancer.
"""
helps['network lb address-pool create'] = """
type: command
short-summary: Create an address pool.
parameters:
- name: --backend-address
short-summary: Backend addresses information for backend address pool. If it's used, --vnet is also required.
long-summary: |
Usage: --backend-address name=addr1 ip-address=10.0.0.1 --vnet MyVnet
name: Required. The name of the backend address.
ip-address: Required. Ip Address within the Virtual Network.
Multiple backend addresses can be specified by using more than one `--backend-address` argument.
- name: --backend-addresses-config-file
short-summary: A config file used to set backend addresses. This argument is for experienced users. You may encounter parse errors if the json file is invalid.
long-summary: |
Usage: --backend-addresses-config-file @"{config_file.json}"
A example config file is
[
{
"name": "address1",
"virtualNetwork": "clitestvnet",
"ipAddress": "10.0.0.4"
},
{
"name": "address2",
"virtualNetwork": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_lb_address_pool_addresses000001/providers/Microsoft.Network/virtualNetworks/clitestvnet",
"ipAddress": "10.0.0.5"
}
]
examples:
- name: Create an address pool.
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool
- name: Create an address pool with several backend addresses using key-value arguments.
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --vnet {VnetResourceId} --backend-address name=addr1 ip-address=10.0.0.1 --backend-address name=addr2 ip-address=10.0.0.3
- name: Create an address pool with several backend addresses using config file
text: az network lb address-pool create -g MyResourceGroup --lb-name MyLb -n MyAddressPool --backend-addresses-config-file @config_file.json
"""
helps['network lb address-pool delete'] = """
type: command
short-summary: Delete an address pool.
examples:
- name: Delete an address pool.
text: az network lb address-pool delete -g MyResourceGroup --lb-name MyLb -n MyAddressPool
"""
helps['network lb address-pool list'] = """
type: command
short-summary: List address pools.
examples:
- name: List address pools.
text: az network lb address-pool list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb address-pool show'] = """
type: command
short-summary: Get the details of an address pool.
examples:
- name: Get the details of an address pool.
text: az network lb address-pool show -g MyResourceGroup --lb-name MyLb -n MyAddressPool
"""
helps['network lb address-pool address'] = """
type: group
short-summary: Manage backend addresses of the load balance backend address pool.
"""
helps['network lb address-pool address add'] = """
type: command
short-summary: Add one backend address into the load balance backend address pool.
examples:
- name: Add one backend address into the load balance backend address pool.
text: az network lb address-pool address add -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress --vnet MyVnet --ip-address 10.0.0.1
"""
helps['network lb address-pool address remove'] = """
type: command
short-summary: Remove one backend address from the load balance backend address pool.
examples:
- name: Remove one backend address from the load balance backend address pool.
text: az network lb address-pool address remove -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool -n MyAddress
"""
helps['network lb address-pool address list'] = """
type: command
short-summary: List all backend addresses of the load balance backend address pool.
examples:
- name: List all backend addresses of the load balance backend address pool.
text: az network lb address-pool address list -g MyResourceGroup --lb-name MyLb --pool-name MyAddressPool
"""
helps['network lb create'] = """
type: command
short-summary: Create a load balancer.
examples:
- name: Create a basic load balancer.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic
- name: Create a basic load balancer on a specific virtual network and subnet. If a virtual network with the same name is found in the same resource group, the load balancer will utilize this virtual network. If one is not found a new one will be created.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --vnet-name MyVnet --subnet MySubnet
- name: Create a basic load balancer on a subnet of a pre-existing virtual network. The subnet can be in arbitary resource group or subscription by providing the ID of the subnet.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --subnet {subnetID}
- name: Create a basic zone flavored internal load balancer, through provisioning a zonal public ip.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Basic --public-ip-zone 2
- name: >
Create a standard zone flavored public-facing load balancer, through provisioning a zonal frontend ip configuration and Vnet.
text: >
az network lb create -g MyResourceGroup -n MyLb --sku Standard --frontend-ip-zone 1 --vnet-name MyVnet --subnet MySubnet
"""
helps['network lb delete'] = """
type: command
short-summary: Delete a load balancer.
examples:
- name: Delete a load balancer.
text: az network lb delete -g MyResourceGroup -n MyLb
"""
helps['network lb frontend-ip'] = """
type: group
short-summary: Manage frontend IP addresses of a load balancer.
"""
helps['network lb frontend-ip create'] = """
type: command
short-summary: Create a frontend IP address.
examples:
- name: Create a frontend ip address for a public load balancer.
text: az network lb frontend-ip create -g MyResourceGroup -n MyFrontendIp --lb-name MyLb --public-ip-address MyFrontendIp
- name: Create a frontend ip address for an internal load balancer.
text: |
az network lb frontend-ip create -g MyResourceGroup -n MyFrontendIp --lb-name MyLb \\
--private-ip-address 10.10.10.100 --subnet MySubnet --vnet-name MyVnet
"""
helps['network lb frontend-ip delete'] = """
type: command
short-summary: Delete a frontend IP address.
examples:
- name: Delete a frontend IP address.
text: az network lb frontend-ip delete -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
"""
helps['network lb frontend-ip list'] = """
type: command
short-summary: List frontend IP addresses.
examples:
- name: List frontend IP addresses.
text: az network lb frontend-ip list -g MyResourceGroup --lb-name MyLb
"""
helps['network lb frontend-ip show'] = """
type: command
short-summary: Get the details of a frontend IP address.
examples:
- name: Get the details of a frontend IP address.
text: az network lb frontend-ip show -g MyResourceGroup --lb-name MyLb -n MyFrontendIp
- name: Get the details of a frontend IP address (autogenerated)
text: |
az network lb frontend-ip show --lb-name MyLb --name MyFrontendIp --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network lb frontend-ip update'] = """
type: command
short-summary: Update a frontend IP address.
examples:
- name: Update the frontend IP address of a public load balancer.
text: az network lb frontend-ip update -g MyResourceGroup --lb-name MyLb -n MyFrontendIp --public-ip-address MyNewPublicIp
- name: Update the frontend IP address of an internal load balancer.
text: az network lb frontend-ip update -g MyResourceGroup --lb-name MyLb -n MyFrontendIp --private-ip-address 10.10.10.50
- name: Update a frontend IP address. (autogenerated)
text: |
az network lb frontend-ip update --lb-name MyLb --name MyFrontendIp --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network lb inbound-nat-pool'] = """
type: group
short-summary: Manage inbound NAT address pools of a load balancer.
"""
helps['network lb inbound-nat-pool create'] = """
type: command
short-summary: Create an inbound NAT address pool.
examples:
- name: Create an inbound NAT address pool.
text: |
az network lb inbound-nat-pool create -g MyResourceGroup --lb-name MyLb \\
-n MyNatPool --protocol Tcp --frontend-port-range-start 80 --frontend-port-range-end 89 \\
--backend-port 80 --frontend-ip-name MyFrontendIp
"""
helps['network lb inbound-nat-pool delete'] = """
type: command
short-summary: Delete an inbound NAT address pool.
examples:
- name: Delete an inbound NAT address pool.
text: az network lb inbound-nat-pool delete -g MyResourceGroup --lb-name MyLb -n MyNatPool
"""
helps['network lb inbound-nat-pool list'] = """
type: command
short-summary: List inbound NAT address pools.
examples:
- name: List inbound NAT address pools.
text: az network lb inbound-nat-pool list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb inbound-nat-pool show'] = """
type: command
short-summary: Get the details of an inbound NAT address pool.
examples:
- name: Get the details of an inbound NAT address pool.
text: az network lb inbound-nat-pool show -g MyResourceGroup --lb-name MyLb -n MyNatPool
"""
helps['network lb inbound-nat-pool update'] = """
type: command
short-summary: Update an inbound NAT address pool.
examples:
- name: Update an inbound NAT address pool to a different backend port.
text: |
az network lb inbound-nat-pool update -g MyResourceGroup --lb-name MyLb -n MyNatPool \\
--protocol Tcp --backend-port 8080
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --backend-port 8080 --enable-tcp-reset true --frontend-port-range-end 89 --frontend-port-range-start 80 --lb-name MyLb --name MyNatPool --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --enable-tcp-reset true --lb-name MyLb --name MyNatPool --protocol Udp --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT address pool. (autogenerated)
text: |
az network lb inbound-nat-pool update --backend-port 8080 --frontend-port-range-end 89 --frontend-port-range-start 80 --lb-name MyLb --name MyNatPool --protocol Udp --resource-group MyResourceGroup
crafted: true
"""
helps['network lb inbound-nat-rule'] = """
type: group
short-summary: Manage inbound NAT rules of a load balancer.
"""
helps['network lb inbound-nat-rule create'] = """
type: command
short-summary: Create an inbound NAT rule.
examples:
- name: Create a basic inbound NAT rule for port 80.
text: |
az network lb inbound-nat-rule create -g MyResourceGroup --lb-name MyLb -n MyNatRule \\
--protocol Tcp --frontend-port 80 --backend-port 80
- name: Create a basic inbound NAT rule for a specific frontend IP and enable floating IP for NAT Rule.
text: |
az network lb inbound-nat-rule create -g MyResourceGroup --lb-name MyLb -n MyNatRule --protocol Tcp \\
--frontend-port 5432 --backend-port 3389 --frontend-ip-name MyFrontendIp --floating-ip true
"""
helps['network lb inbound-nat-rule delete'] = """
type: command
short-summary: Delete an inbound NAT rule.
examples:
- name: Delete an inbound NAT rule.
text: az network lb inbound-nat-rule delete -g MyResourceGroup --lb-name MyLb -n MyNatRule
"""
helps['network lb inbound-nat-rule list'] = """
type: command
short-summary: List inbound NAT rules.
examples:
- name: List inbound NAT rules.
text: az network lb inbound-nat-rule list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb inbound-nat-rule show'] = """
type: command
short-summary: Get the details of an inbound NAT rule.
examples:
- name: Get the details of an inbound NAT rule.
text: az network lb inbound-nat-rule show -g MyResourceGroup --lb-name MyLb -n MyNatRule
"""
helps['network lb inbound-nat-rule update'] = """
type: command
short-summary: Update an inbound NAT rule.
examples:
- name: Update an inbound NAT rule to disable floating IP and modify idle timeout duration.
text: |
az network lb inbound-nat-rule update -g MyResourceGroup --lb-name MyLb -n MyNatRule \\
--floating-ip false --idle-timeout 5
- name: Update an inbound NAT rule. (autogenerated)
text: |
az network lb inbound-nat-rule update --backend-port 3389 --frontend-port 5432 --lb-name MyLb --name MyNatRule --protocol Udp --resource-group MyResourceGroup
crafted: true
- name: Update an inbound NAT rule. (autogenerated)
text: |
az network lb inbound-nat-rule update --lb-name MyLb --name MyNatRule --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network lb list'] = """
type: command
short-summary: List load balancers.
examples:
- name: List load balancers.
text: az network lb list -g MyResourceGroup
"""
helps['network lb outbound-rule'] = """
type: group
short-summary: Manage outbound rules of a load balancer.
"""
helps['network lb outbound-rule create'] = """
type: command
short-summary: Create an outbound-rule.
examples:
- name: Create an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule create --address-pool MyAddressPool --frontend-ip-configs myfrontendoutbound --idle-timeout 5 --lb-name MyLb --name MyOutboundRule --outbound-ports 10000 --protocol Udp --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule delete'] = """
type: command
short-summary: Delete an outbound-rule.
examples:
- name: Delete an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule delete --lb-name MyLb --name MyOutboundRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule list'] = """
type: command
short-summary: List outbound rules.
examples:
- name: List outbound rules. (autogenerated)
text: |
az network lb outbound-rule list --lb-name MyLb --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule show'] = """
type: command
short-summary: Get the details of an outbound rule.
examples:
- name: Get the details of an outbound rule. (autogenerated)
text: |
az network lb outbound-rule show --lb-name MyLb --name MyOutboundRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb outbound-rule update'] = """
type: command
short-summary: Update an outbound-rule.
examples:
- name: Update an outbound-rule. (autogenerated)
text: |
az network lb outbound-rule update --enable-tcp-reset true --lb-name MyLb --name MyOutboundRule --outbound-ports 10000 --resource-group MyResourceGroup
crafted: true
"""
helps['network lb probe'] = """
type: group
short-summary: Evaluate probe information and define routing rules.
"""
helps['network lb probe create'] = """
type: command
short-summary: Create a probe.
examples:
- name: Create a probe on a load balancer over HTTP and port 80.
text: |
az network lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol http --port 80 --path /
- name: Create a probe on a load balancer over TCP on port 443.
text: |
az network lb probe create -g MyResourceGroup --lb-name MyLb -n MyProbe \\
--protocol tcp --port 443
"""
helps['network lb probe delete'] = """
type: command
short-summary: Delete a probe.
examples:
- name: Delete a probe.
text: az network lb probe delete -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network lb probe list'] = """
type: command
short-summary: List probes.
examples:
- name: List probes.
text: az network lb probe list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb probe show'] = """
type: command
short-summary: Get the details of a probe.
examples:
- name: Get the details of a probe.
text: az network lb probe show -g MyResourceGroup --lb-name MyLb -n MyProbe
"""
helps['network lb probe update'] = """
type: command
short-summary: Update a probe.
examples:
- name: Update a probe with a different port and interval.
text: az network lb probe update -g MyResourceGroup --lb-name MyLb -n MyProbe --port 81 --interval 10
- name: Update a probe. (autogenerated)
text: |
az network lb probe update --lb-name MyLb --name MyProbe --port 81 --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network lb rule'] = """
type: group
short-summary: Manage load balancing rules.
"""
helps['network lb rule create'] = """
type: command
short-summary: Create a load balancing rule.
examples:
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --frontend-port 80 \\
--backend-pool-name MyAddressPool --backend-port 80
- name: >
Create a load balancing rule that assigns a front-facing IP configuration and port to an address pool and port with the floating ip feature.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Tcp \\
--frontend-ip-name MyFrontEndIp --backend-pool-name MyAddressPool \\
--floating-ip true --frontend-port 80 --backend-port 80
- name: >
Create an HA ports load balancing rule that assigns a frontend IP and port to use all available backend IPs in a pool on the same port.
text: |
az network lb rule create -g MyResourceGroup --lb-name MyLb -n MyHAPortsRule \\
--protocol All --frontend-port 0 --backend-port 0 --frontend-ip-name MyFrontendIp \\
--backend-pool-name MyAddressPool
"""
helps['network lb rule delete'] = """
type: command
short-summary: Delete a load balancing rule.
examples:
- name: Delete a load balancing rule.
text: az network lb rule delete -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network lb rule list'] = """
type: command
short-summary: List load balancing rules.
examples:
- name: List load balancing rules.
text: az network lb rule list -g MyResourceGroup --lb-name MyLb -o table
"""
helps['network lb rule show'] = """
type: command
short-summary: Get the details of a load balancing rule.
examples:
- name: Get the details of a load balancing rule.
text: az network lb rule show -g MyResourceGroup --lb-name MyLb -n MyLbRule
"""
helps['network lb rule update'] = """
type: command
short-summary: Update a load balancing rule.
examples:
- name: Update a load balancing rule to change the protocol to UDP.
text: az network lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule --protocol Udp
examples:
- name: Update a load balancing rule to support HA ports.
text: az network lb rule update -g MyResourceGroup --lb-name MyLb -n MyLbRule \\ --protocol All --frontend-port 0 --backend-port 0
- name: Update a load balancing rule. (autogenerated)
text: |
az network lb rule update --disable-outbound-snat true --lb-name MyLb --name MyLbRule --resource-group MyResourceGroup
crafted: true
"""
helps['network lb show'] = """
type: command
short-summary: Get the details of a load balancer.
examples:
- name: Get the details of a load balancer.
text: az network lb show -g MyResourceGroup -n MyLb
"""
helps['network lb update'] = """
type: command
short-summary: Update a load balancer.
long-summary: >
This command can only be used to update the tags for a load balancer. Name and resource group are immutable and cannot be updated.
examples:
- name: Update the tags of a load balancer.
text: az network lb update -g MyResourceGroup -n MyLb --set tags.CostCenter=MyBusinessGroup
"""
helps['network list-service-tags'] = """
type: command
short-summary: List all service tags which are below to different resources
long-summary: >
A service tag represents a group of IP address prefixes to help minimize complexity for security rule creation.
To learn more about list-service-tags, visit https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags. \\
Note that the location parameter is used as a reference for version (not as a filter based on location).
For example, even if you specify --location eastus2 you will get the list of service tags with prefix details across all regions but limited to the cloud that your subscription belongs to (i.e. Public, US government, China or Germany).
examples:
- name: Gets a list of service tag information resources. (autogenerated)
text: |
az network list-service-tags --location westus2
crafted: true
"""
helps['network list-usages'] = """
type: command
short-summary: List the number of network resources in a region that are used against a subscription quota.
examples:
- name: List the provisioned network resources in East US region within a subscription.
text: az network list-usages --location eastus -o table
"""
helps['network local-gateway'] = """
type: group
short-summary: Manage local gateways.
long-summary: >
For more information on local gateways, visit: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli#localnet
"""
helps['network local-gateway create'] = """
type: command
short-summary: Create a local VPN gateway.
examples:
- name: Create a Local Network Gateway to represent your on-premises site.
text: |
az network local-gateway create -g MyResourceGroup -n MyLocalGateway \\
--gateway-ip-address 23.99.221.164 --local-address-prefixes 10.0.0.0/24 20.0.0.0/24
"""
helps['network local-gateway delete'] = """
type: command
short-summary: Delete a local VPN gateway.
long-summary: >
In order to delete a Local Network Gateway, you must first delete ALL Connection objects in Azure
that are connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use.
For more information, follow the order of instructions on this page: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
examples:
- name: Create a Local Network Gateway to represent your on-premises site.
text: az network local-gateway delete -g MyResourceGroup -n MyLocalGateway
- name: Delete a local VPN gateway. (autogenerated)
text: |
az network local-gateway delete --name MyLocalGateway --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network local-gateway list'] = """
type: command
short-summary: List all local VPN gateways in a resource group.
examples:
- name: List all local VPN gateways in a resource group.
text: az network local-gateway list -g MyResourceGroup
"""
helps['network local-gateway show'] = """
type: command
short-summary: Get the details of a local VPN gateway.
examples:
- name: Get the details of a local VPN gateway.
text: az network local-gateway show -g MyResourceGroup -n MyLocalGateway
"""
helps['network local-gateway update'] = """
type: command
short-summary: Update a local VPN gateway.
examples:
- name: Update a Local Network Gateway provisioned with a 10.0.0.0/24 address prefix with additional prefixes.
text: |
az network local-gateway update -g MyResourceGroup -n MyLocalGateway \\
--local-address-prefixes 10.0.0.0/24 20.0.0.0/24 30.0.0.0/24
- name: Update a local VPN gateway. (autogenerated)
text: |
az network local-gateway update --gateway-ip-address 23.99.221.164 --name MyLocalGateway --resource-group MyResourceGroup
crafted: true
"""
helps['network local-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the local gateway is met.
examples:
- name: Wait for Local Network Gateway to return as created.
text: |
az network local-gateway wait -g MyResourceGroup -n MyLocalGateway --created
"""
helps['network nic'] = """
type: group
short-summary: Manage network interfaces.
long-summary: >
To learn more about network interfaces in Azure visit https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface
"""
helps['network nic create'] = """
type: command
short-summary: Create a network interface.
examples:
- name: Create a network interface for a specified subnet on a specified virtual network.
text: >
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic
- name: >
Create a network interface for a specified subnet on a virtual network which allows
IP forwarding subject to a network security group.
text: |
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic \\
--ip-forwarding --network-security-group MyNsg
- name: >
Create a network interface for a specified subnet on a virtual network with network security group and application security groups.
text: |
az network nic create -g MyResourceGroup --vnet-name MyVnet --subnet MySubnet -n MyNic \\
--network-security-group MyNsg --application-security-groups Web App
"""
helps['network nic delete'] = """
type: command
short-summary: Delete a network interface.
examples:
- name: Delete a network interface.
text: >
az network nic delete -g MyResourceGroup -n MyNic
"""
helps['network nic ip-config'] = """
type: group
short-summary: Manage IP configurations of a network interface.
"""
helps['network nic ip-config address-pool'] = """
type: group
short-summary: Manage address pools in an IP configuration.
"""
helps['network nic ip-config address-pool add'] = """
type: command
short-summary: Add an address pool to an IP configuration.
examples:
- name: Add an address pool to an IP configuration.
text: |
az network nic ip-config address-pool add -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --address-pool MyAddressPool
- name: Add an address pool to an IP configuration. (autogenerated)
text: |
az network nic ip-config address-pool add --address-pool MyAddressPool --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config address-pool remove'] = """
type: command
short-summary: Remove an address pool of an IP configuration.
examples:
- name: Remove an address pool of an IP configuration.
text: |
az network nic ip-config address-pool remove -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --address-pool MyAddressPool
- name: Remove an address pool of an IP configuration. (autogenerated)
text: |
az network nic ip-config address-pool remove --address-pool MyAddressPool --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config create'] = """
type: command
short-summary: Create an IP configuration.
long-summary: >
You must have the Microsoft.Network/AllowMultipleIpConfigurationsPerNic feature enabled for your subscription.
Only one configuration may be designated as the primary IP configuration per NIC, using the `--make-primary` flag.
examples:
- name: Create a primary IP configuration for a NIC.
text: az network nic ip-config create -g MyResourceGroup -n MyIpConfig --nic-name MyNic --make-primary
- name: Create an IP configuration. (autogenerated)
text: |
az network nic ip-config create --name MyIpConfig --nic-name MyNic --private-ip-address 10.0.0.9 --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config delete'] = """
type: command
short-summary: Delete an IP configuration.
long-summary: A NIC must have at least one IP configuration.
examples:
- name: Delete an IP configuration.
text: az network nic ip-config delete -g MyResourceGroup -n MyIpConfig --nic-name MyNic
"""
helps['network nic ip-config inbound-nat-rule'] = """
type: group
short-summary: Manage inbound NAT rules of an IP configuration.
"""
helps['network nic ip-config inbound-nat-rule add'] = """
type: command
short-summary: Add an inbound NAT rule to an IP configuration.
examples:
- name: Add an inbound NAT rule to an IP configuration.
text: |
az network nic ip-config inbound-nat-rule add -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --inbound-nat-rule MyNatRule
- name: Add an inbound NAT rule to an IP configuration. (autogenerated)
text: |
az network nic ip-config inbound-nat-rule add --inbound-nat-rule MyNatRule --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config inbound-nat-rule remove'] = """
type: command
short-summary: Remove an inbound NAT rule of an IP configuration.
examples:
- name: Remove an inbound NAT rule of an IP configuration.
text: |
az network nic ip-config inbound-nat-rule remove -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --inbound-nat-rule MyNatRule
- name: Remove an inbound NAT rule of an IP configuration. (autogenerated)
text: |
az network nic ip-config inbound-nat-rule remove --inbound-nat-rule MyNatRule --ip-config-name MyIpConfig --lb-name MyLb --nic-name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic ip-config list'] = """
type: command
short-summary: List the IP configurations of a NIC.
examples:
- name: List the IP configurations of a NIC.
text: az network nic ip-config list -g MyResourceGroup --nic-name MyNic
"""
helps['network nic ip-config show'] = """
type: command
short-summary: Show the details of an IP configuration.
examples:
- name: Show the details of an IP configuration of a NIC.
text: az network nic ip-config show -g MyResourceGroup -n MyIpConfig --nic-name MyNic
"""
helps['network nic ip-config update'] = """
type: command
short-summary: Update an IP configuration.
examples:
- name: Update a NIC to use a new private IP address.
text: |
az network nic ip-config update -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --private-ip-address 10.0.0.9
- name: Make an IP configuration the default for the supplied NIC.
text: |
az network nic ip-config update -g MyResourceGroup --nic-name MyNic \\
-n MyIpConfig --make-primary
- name: Update an IP configuration. (autogenerated)
text: |
az network nic ip-config update --name MyIpConfig --nic-name MyNic --public-ip-address MyAppGatewayPublicIp --resource-group MyResourceGroup
crafted: true
"""
helps['network nic list'] = """
type: command
short-summary: List network interfaces.
long-summary: >
To list network interfaces attached to VMs in VM scale sets use 'az vmss nic list' or 'az vmss nic list-vm-nics'.
examples:
- name: List all NICs by internal DNS suffix.
text: >
az network nic list --query "[?dnsSettings.internalDomainNameSuffix=`{dnsSuffix}`]"
"""
helps['network nic list-effective-nsg'] = """
type: command
short-summary: List all effective network security groups applied to a network interface.
long-summary: >
To learn more about troubleshooting using effective security rules visit https://docs.microsoft.com/azure/virtual-network/virtual-network-nsg-troubleshoot-portal
examples:
- name: List the effective security groups associated with a NIC.
text: az network nic list-effective-nsg -g MyResourceGroup -n MyNic
"""
helps['network nic show'] = """
type: command
short-summary: Get the details of a network interface.
examples:
- name: Get the internal domain name suffix of a NIC.
text: az network nic show -g MyResourceGroup -n MyNic --query "dnsSettings.internalDomainNameSuffix"
"""
helps['network nic show-effective-route-table'] = """
type: command
short-summary: Show the effective route table applied to a network interface.
long-summary: >
To learn more about troubleshooting using the effective route tables visit
https://docs.microsoft.com/azure/virtual-network/virtual-network-routes-troubleshoot-portal#using-effective-routes-to-troubleshoot-vm-traffic-flow
examples:
- name: Show the effective routes applied to a network interface.
text: az network nic show-effective-route-table -g MyResourceGroup -n MyNic
"""
helps['network nic update'] = """
type: command
short-summary: Update a network interface.
examples:
- name: Update a network interface to use a different network security group.
text: az network nic update -g MyResourceGroup -n MyNic --network-security-group MyNewNsg
- name: Update a network interface. (autogenerated)
text: |
az network nic update --accelerated-networking true --name MyNic --resource-group MyResourceGroup
crafted: true
"""
helps['network nic wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the network interface is met.
examples:
- name: Pause CLI until the network interface is created.
text: az network nic wait -g MyResourceGroup -n MyNic --created
"""
helps['network nsg'] = """
type: group
short-summary: Manage Azure Network Security Groups (NSGs).
long-summary: >
You can control network traffic to resources in a virtual network using a network security group.
A network security group contains a list of security rules that allow or deny inbound or
outbound network traffic based on source or destination IP addresses, Application Security
Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli
"""
helps['network nsg create'] = """
type: command
short-summary: Create a network security group.
examples:
- name: Create an NSG in a resource group within a region with tags.
text: az network nsg create -g MyResourceGroup -n MyNsg --tags super_secure no_80 no_22
"""
helps['network nsg delete'] = """
type: command
short-summary: Delete a network security group.
examples:
- name: Delete an NSG in a resource group.
text: az network nsg delete -g MyResourceGroup -n MyNsg
"""
helps['network nsg list'] = """
type: command
short-summary: List network security groups.
examples:
- name: List all NSGs in the 'westus' region.
text: az network nsg list --query "[?location=='westus']"
"""
helps['network nsg rule'] = """
type: group
short-summary: Manage network security group rules.
"""
helps['network nsg rule create'] = """
type: command
short-summary: Create a network security group rule.
examples:
- name: Create a basic "Allow" NSG rule with the highest priority.
text: >
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --priority 100
- name: Create a "Deny" rule over TCP for a specific IP address range with the lowest priority.
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --priority 4096 \\
--source-address-prefixes 208.130.28/24 --source-port-ranges 80 \\
--destination-address-prefixes '*' --destination-port-ranges 80 8080 --access Deny \\
--protocol Tcp --description "Deny from specific IP address ranges on 80 and 8080."
- name: Create a security rule using service tags. For more details visit https://aka.ms/servicetags
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRuleWithTags \\
--priority 400 --source-address-prefixes VirtualNetwork --destination-address-prefixes Storage \\
--destination-port-ranges '*' --direction Outbound --access Allow --protocol Tcp --description "Allow VirtualNetwork to Storage."
- name: Create a security rule using application security groups. https://aka.ms/applicationsecuritygroups
text: |
az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRuleWithAsg \\
--priority 500 --source-address-prefixes Internet --destination-port-ranges 80 8080 \\
--destination-asgs Web --access Allow --protocol Tcp --description "Allow Internet to Web ASG on ports 80,8080."
"""
helps['network nsg rule delete'] = """
type: command
short-summary: Delete a network security group rule.
examples:
- name: Delete a network security group rule.
text: az network nsg rule delete -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule
"""
helps['network nsg rule list'] = """
type: command
short-summary: List all rules in a network security group.
examples:
- name: List all rules in a network security group.
text: az network nsg rule list -g MyResourceGroup --nsg-name MyNsg
"""
helps['network nsg rule show'] = """
type: command
short-summary: Get the details of a network security group rule.
examples:
- name: Get the details of a network security group rule.
text: az network nsg rule show -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule
"""
helps['network nsg rule update'] = """
type: command
short-summary: Update a network security group rule.
examples:
- name: Update an NSG rule with a new wildcard destination address prefix.
text: az network nsg rule update -g MyResourceGroup --nsg-name MyNsg -n MyNsgRule --destination-address-prefix '*'
- name: Update a network security group rule. (autogenerated)
text: |
az network nsg rule update --name MyNsgRule --nsg-name MyNsg --resource-group MyResourceGroup --source-address-prefixes 208.130.28/24
crafted: true
"""
helps['network nsg show'] = """
type: command
short-summary: Get information about a network security group.
examples:
- name: Get basic information about an NSG.
text: az network nsg show -g MyResourceGroup -n MyNsg
- name: Get the default security rules of an NSG and format the output as a table.
text: az network nsg show -g MyResourceGroup -n MyNsg --query "defaultSecurityRules[]" -o table
- name: Get all default NSG rules with "Allow" access and format the output as a table.
text: az network nsg show -g MyResourceGroup -n MyNsg --query "defaultSecurityRules[?access=='Allow']" -o table
"""
helps['network nsg update'] = """
type: command
short-summary: Update a network security group.
long-summary: >
This command can only be used to update the tags of an NSG. Name and resource group are immutable and cannot be updated.
examples:
- name: Remove a tag of an NSG.
text: az network nsg update -g MyResourceGroup -n MyNsg --remove tags.no_80
- name: Update a network security group. (autogenerated)
text: |
az network nsg update --name MyNsg --resource-group MyResourceGroup --set tags.CostCenter=MyBusinessGroup
crafted: true
"""
helps['network private-endpoint'] = """
type: group
short-summary: Manage private endpoints.
"""
helps['network private-endpoint create'] = """
type: command
short-summary: Create a private endpoint.
examples:
- name: Create a private endpoint.
text: az network private-endpoint create -g MyResourceGroup -n MyPE --vnet-name MyVnetName --subnet MySubnet --private-connection-resource-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.Network/privateLinkServices/MyPLS" --connection-name tttt -l centralus
"""
helps['network private-endpoint delete'] = """
type: command
short-summary: Delete a private endpoint.
examples:
- name: Delete a private endpoint. (autogenerated)
text: |
az network private-endpoint delete --name MyPrivateEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint list'] = """
type: command
short-summary: List private endpoints.
"""
helps['network private-endpoint show'] = """
type: command
short-summary: Get the details of a private endpoint.
examples:
- name: Get the details of a private endpoint (autogenerated)
text: |
az network private-endpoint show --name MyPrivateEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint update'] = """
type: command
short-summary: Update a private endpoint.
examples:
- name: Update a private endpoint.
text: az network private-endpoint update -g MyResourceGroup -n MyPE --request-message "test" --tags mytag=hello
- name: Update a private endpoint. (autogenerated)
text: |
az network private-endpoint update --name MyPE --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network private-endpoint dns-zone-group'] = """
type: group
short-summary: Manage private endpoint dns zone group.
"""
helps['network private-endpoint dns-zone-group create'] = """
type: command
short-summary: Create a private endpoint dns zone group.
examples:
- name: Create a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group create --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1 --private-dns-zone PrivateDNSZone1
"""
helps['network private-endpoint dns-zone-group add'] = """
type: command
short-summary: Add a private endpoint dns zone into a dns zone group.
examples:
- name: Add a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group add --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1 --private-dns-zone PrivateDNSZone1
"""
helps['network private-endpoint dns-zone-group remove'] = """
type: command
short-summary: Remove a private endpoint dns zone into a dns zone group.
examples:
- name: Remove a private endpoint dns zone group.
text: az network private-endpoint dns-zone-group remove --endpoint-name MyPE -g MyRG -n MyZoneGroup --zone-name Zone1
"""
helps['network private-endpoint dns-zone-group delete'] = """
type: command
short-summary: Delete a private endpoint dns zone group.
examples:
- name: Delete a private endpoint dns zone group. (autogenerated)
text: |
az network private-endpoint dns-zone-group delete --endpoint-name MyEndpoint --name MyPrivateDnsZoneGroup --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint dns-zone-group list'] = """
type: command
short-summary: List all private endpoint dns zone groups.
examples:
- name: List all private endpoint dns zone groups. (autogenerated)
text: |
az network private-endpoint dns-zone-group list --endpoint-name MyEndpoint --resource-group MyResourceGroup
crafted: true
"""
helps['network private-endpoint dns-zone-group show'] = """
type: command
short-summary: Show a private endpoint dns zone group.
examples:
- name: Show a private endpoint dns zone group. (autogenerated)
text: |
az network private-endpoint dns-zone-group show --endpoint-name MyEndpoint --name MyPrivateDnsZoneGroup --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service'] = """
type: group
short-summary: Manage private link services.
"""
helps['network private-link-service connection'] = """
type: group
short-summary: Manage private link service endpoint connections.
"""
helps['network private-link-service connection delete'] = """
type: command
short-summary: Delete a private link service endpoint connection.
examples:
- name: Delete a private link service endpoint connection. (autogenerated)
text: |
az network private-link-service connection delete --name MyPrivateEndpointConnection --resource-group MyResourceGroup --service-name MyService
crafted: true
"""
helps['network private-link-service connection update'] = """
type: command
short-summary: Update a private link service endpoint connection.
long-summary: >
To update the connection status, the name of the connection should be provided.
Please obtain this name by running 'az network private-link-service show -g MyResourceGroup -n MyPLSName'.
The connection name is under the 'privateEndpointConnections' filed.
examples:
- name: Update the endpoint connections status of private link service
text: az network private-link-service connection update -g MyResourceGroup -n MyEndpointName.f072a430-2d82-4470-ab30-d23fcfee58d1 --service-name MyPLSName --connection-status Rejected
"""
helps['network private-link-service create'] = """
type: command
short-summary: Create a private link service.
examples:
- name: Create a private link service
text: az network private-link-service create -g MyResourceGroup -n MyPLSName --vnet-name MyVnetName --subnet MySubnet --lb-name MyLBName --lb-frontend-ip-configs LoadBalancerFrontEnd -l centralus
"""
helps['network private-link-service delete'] = """
type: command
short-summary: Delete a private link service.
examples:
- name: Delete a private link service. (autogenerated)
text: |
az network private-link-service delete --name MyPrivateLinkService --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service list'] = """
type: command
short-summary: List private link services.
"""
helps['network private-link-service show'] = """
type: command
short-summary: Get the details of a private link service.
examples:
- name: Get the details of a private link service. (autogenerated)
text: |
az network private-link-service show --name MyPrivateLinkService --resource-group MyResourceGroup
crafted: true
"""
helps['network private-link-service update'] = """
type: command
short-summary: Update a private link service.
examples:
- name: Update a private link service
text: az network private-link-service update -g MyResourceGroup -n MyPLSName --visibility SubId1 SubId2 --auto-approval SubId1 SubId2
"""
helps['network private-endpoint-connection'] = """
type: group
short-summary: Manage private endpoint connections.
"""
helps['network private-endpoint-connection approve'] = """
type: command
short-summary: Approve a private endpoint connection.
examples:
- name: Approve a private endpoint connection for a storage account.
text: az network private-endpoint-connection approve -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts --description "Approved"
- name: Approve a private endpoint connection for a keyvault.
text: az network private-endpoint-connection approve -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults --description "Approved"
- name: Approve a private endpoint connection for an ACR.
text: az network private-endpoint-connection approve --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d --description "Approved"
"""
helps['network private-endpoint-connection reject'] = """
type: command
short-summary: Reject a private endpoint connection.
examples:
- name: Reject a private endpoint connection for a storage account.
text: az network private-endpoint-connection reject -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts --description "Rejected"
- name: Reject a private endpoint connection for a keyvault.
text: az network private-endpoint-connection reject -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults --description "Rejected"
- name: Reject a private endpoint connection for an ACR.
text: az network private-endpoint-connection reject --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d --description "Rejected"
"""
helps['network private-endpoint-connection delete'] = """
type: command
short-summary: Delete a private endpoint connection.
examples:
- name: Delete a private endpoint connection for a storage account.
text: az network private-endpoint-connection delete -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts
- name: Delete a private endpoint connection for a keyvault.
text: az network private-endpoint-connection delete -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults
- name: Delete a private endpoint connection for an ACR.
text: az network private-endpoint-connection delete --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d
"""
helps['network private-endpoint-connection show'] = """
type: command
short-summary: Show a private endpoint connection.
examples:
- name: Show a private endpoint connection for a storage account.
text: az network private-endpoint-connection show -g MyResourceGroup -n MyPrivateEndpoint --resource-name MySA --type Microsoft.Storage/storageAccounts
- name: Show a private endpoint connection for a keyvault.
text: az network private-endpoint-connection show -g MyResourceGroup -n MyPrivateEndpoint --resource-name MyKV --type Microsoft.Keyvault/vaults
- name: Show a private endpoint connection for an ACR.
text: az network private-endpoint-connection show --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002/privateEndpointConnections/testreg000002.6e6bf72bc59d41cc89c698d4cc5ee79d
"""
helps['network private-endpoint-connection list'] = """
type: command
short-summary: List all private endpoint connections.
examples:
- name: List all private endpoint connections for a storage account.
text: az network private-endpoint-connection list -g MyResourceGroup -n MySA --type Microsoft.Storage/storageAccounts
- name: List all private endpoint connections for a keyvault.
text: az network private-endpoint-connection list -g MyResourceGroup -n MyKV --type Microsoft.Keyvault/vaults
- name: List all private endpoint connections for an ACR.
text: az network private-endpoint-connection list --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002
"""
helps['network private-link-resource'] = """
type: group
short-summary: Manage private link resources.
"""
helps['network private-link-resource list'] = """
type: command
short-summary: List all private link resources.
examples:
- name: List all private link resources for a storage account.
text: az network private-link-resource list -g MyResourceGroup -n MySA --type Microsoft.Storage/storageAccounts
- name: List all private link resources for a keyvault.
text: az network private-link-resource list -g MyResourceGroup -n MyKV --type Microsoft.Keyvault/vaults
- name: List all private link resources for an ACR.
text: az network private-link-resource list --id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.ContainerRegistry/registries/testreg000002
"""
helps['network profile'] = """
type: group
short-summary: Manage network profiles.
long-summary: >
To create a network profile, see the create command for the relevant resource. Currently,
only Azure Container Instances are supported.
"""
helps['network profile delete'] = """
type: command
short-summary: Delete a network profile.
examples:
- name: Delete a network profile. (autogenerated)
text: |
az network profile delete --name MyNetworkProfile --resource-group MyResourceGroup
crafted: true
"""
helps['network profile list'] = """
type: command
short-summary: List network profiles.
examples:
- name: List network profiles (autogenerated)
text: |
az network profile list --resource-group MyResourceGroup
crafted: true
"""
helps['network profile show'] = """
type: command
short-summary: Get the details of a network profile.
examples:
- name: Get the details of a network profile. (autogenerated)
text: |
az network profile show --name MyNetworkProfile --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip'] = """
type: group
short-summary: Manage public IP addresses.
long-summary: >
To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address
"""
helps['network public-ip create'] = """
type: command
short-summary: Create a public IP address.
long-summary: >
[Coming breaking change] In the coming release, the default behavior will be changed as follows when sku is Standard and zone is not provided: For zonal regions, you will get a zone-redundant IP indicated by zones:["1","2","3"]; For non-zonal regions, you will get a non zone-redundant IP indicated by zones:[].
examples:
- name: Create a basic public IP resource.
text: az network public-ip create -g MyResourceGroup -n MyIp
- name: Create a static public IP resource for a DNS name label.
text: az network public-ip create -g MyResourceGroup -n MyIp --dns-name MyLabel --allocation-method Static
- name: Create a public IP resource in an availability zone in the current resource group region.
text: az network public-ip create -g MyResourceGroup -n MyIp --zone 2
"""
helps['network public-ip delete'] = """
type: command
short-summary: Delete a public IP address.
examples:
- name: Delete a public IP address.
text: az network public-ip delete -g MyResourceGroup -n MyIp
"""
helps['network public-ip list'] = """
type: command
short-summary: List public IP addresses.
examples:
- name: List all public IPs in a subscription.
text: az network public-ip list
- name: List all public IPs in a resource group.
text: az network public-ip list -g MyResourceGroup
- name: List all public IPs of a domain name label.
text: az network public-ip list -g MyResourceGroup --query "[?dnsSettings.domainNameLabel=='MyLabel']"
"""
helps['network public-ip prefix'] = """
type: group
short-summary: Manage public IP prefix resources.
"""
helps['network public-ip prefix create'] = """
type: command
short-summary: Create a public IP prefix resource.
examples:
- name: Create a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix create --length 28 --location westus2 --name MyPublicIPPrefix --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip prefix delete'] = """
type: command
short-summary: Delete a public IP prefix resource.
examples:
- name: Delete a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix delete --name MyPublicIPPrefix --resource-group MyResourceGroup
crafted: true
"""
helps['network public-ip prefix list'] = """
type: command
short-summary: List public IP prefix resources.
"""
helps['network public-ip prefix show'] = """
type: command
short-summary: Get the details of a public IP prefix resource.
examples:
- name: Get the details of a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix show --name MyPublicIPPrefix --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network public-ip prefix update'] = """
type: command
short-summary: Update a public IP prefix resource.
examples:
- name: Update a public IP prefix resource. (autogenerated)
text: |
az network public-ip prefix update --name MyPublicIPPrefix --resource-group MyResourceGroup --set useRemoteGateways=true
crafted: true
"""
helps['network public-ip show'] = """
type: command
short-summary: Get the details of a public IP address.
examples:
- name: Get information about a public IP resource.
text: az network public-ip show -g MyResourceGroup -n MyIp
- name: Get the FQDN and IP address of a public IP resource.
text: >
az network public-ip show -g MyResourceGroup -n MyIp --query "{fqdn: dnsSettings.fqdn, address: ipAddress}"
"""
helps['network public-ip update'] = """
type: command
short-summary: Update a public IP address.
examples:
- name: Update a public IP resource with a DNS name label and static allocation.
text: az network public-ip update -g MyResourceGroup -n MyIp --dns-name MyLabel --allocation-method Static
"""
helps['network route-filter'] = """
type: group
short-summary: Manage route filters.
long-summary: >
To learn more about route filters with Microsoft peering with ExpressRoute, visit https://docs.microsoft.com/azure/expressroute/how-to-routefilter-cli
"""
helps['network route-filter create'] = """
type: command
short-summary: Create a route filter.
examples:
- name: Create a route filter.
text: az network route-filter create -g MyResourceGroup -n MyRouteFilter
- name: Create a route filter. (autogenerated)
text: |
az network route-filter create --location westus2 --name MyRouteFilter --resource-group MyResourceGroup
crafted: true
"""
helps['network route-filter delete'] = """
type: command
short-summary: Delete a route filter.
examples:
- name: Delete a route filter.
text: az network route-filter delete -g MyResourceGroup -n MyRouteFilter
"""
helps['network route-filter list'] = """
type: command
short-summary: List route filters.
examples:
- name: List route filters in a resource group.
text: az network route-filter list -g MyResourceGroup
"""
helps['network route-filter rule'] = """
type: group
short-summary: Manage rules in a route filter.
long-summary: >
To learn more about route filters with Microsoft peering with ExpressRoute, visit https://docs.microsoft.com/azure/expressroute/how-to-routefilter-cli
"""
helps['network route-filter rule create'] = """
type: command
short-summary: Create a rule in a route filter.
parameters:
- name: --communities
short-summary: Space-separated list of border gateway protocol (BGP) community values to filter on.
populator-commands:
- az network route-filter rule list-service-communities
examples:
- name: Create a rule in a route filter to allow Dynamics 365.
text: |
az network route-filter rule create -g MyResourceGroup --filter-name MyRouteFilter \\
-n MyRouteFilterRule --communities 12076:5040 --access Allow
"""
helps['network route-filter rule delete'] = """
type: command
short-summary: Delete a rule from a route filter.
examples:
- name: Delete a rule from a route filter.
text: az network route-filter rule delete -g MyResourceGroup --filter-name MyRouteFilter -n MyRouteFilterRule
"""
helps['network route-filter rule list'] = """
type: command
short-summary: List rules in a route filter.
examples:
- name: List rules in a route filter.
text: az network route-filter rule list -g MyResourceGroup --filter-name MyRouteFilter
"""
helps['network route-filter rule list-service-communities'] = """
type: command
short-summary: Gets all the available BGP service communities.
examples:
- name: Gets all the available BGP service communities.
text: az network route-filter rule list-service-communities -o table
- name: Get the community value for Exchange.
text: |
az network route-filter rule list-service-communities \\
--query '[].bgpCommunities[?communityName==`Exchange`].[communityValue][][]' -o tsv
"""
helps['network route-filter rule show'] = """
type: command
short-summary: Get the details of a rule in a route filter.
examples:
- name: Get the details of a rule in a route filter.
text: az network route-filter rule show -g MyResourceGroup --filter-name MyRouteFilter -n MyRouteFilterRule
"""
helps['network route-filter rule update'] = """
type: command
short-summary: Update a rule in a route filter.
examples:
- name: Update a rule in a route filter to add Exchange to rule list.
text: |
az network route-filter rule update -g MyResourceGroup --filter-name MyRouteFilter \\
-n MyRouteFilterRule --add communities='12076:5010'
"""
helps['network route-filter show'] = """
type: command
short-summary: Get the details of a route filter.
examples:
- name: Get the details of a route filter.
text: az network route-filter show -g MyResourceGroup -n MyRouteFilter
- name: Get the details of a route filter. (autogenerated)
text: |
az network route-filter show --expand peerings --name MyRouteFilter --resource-group MyResourceGroup
crafted: true
"""
helps['network route-filter update'] = """
type: command
short-summary: Update a route filter.
long-summary: >
This command can only be used to update the tags for a route filter. Name and resource group are immutable and cannot be updated.
examples:
- name: Update the tags on a route filter.
text: az network route-filter update -g MyResourceGroup -n MyRouteFilter --set tags.CostCenter=MyBusinessGroup
"""
helps['network route-table'] = """
type: group
short-summary: Manage route tables.
"""
helps['network route-table create'] = """
type: command
short-summary: Create a route table.
examples:
- name: Create a route table.
text: az network route-table create -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table delete'] = """
type: command
short-summary: Delete a route table.
examples:
- name: Delete a route table.
text: az network route-table delete -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table list'] = """
type: command
short-summary: List route tables.
examples:
- name: List all route tables in a subscription.
text: az network route-table list -g MyResourceGroup
"""
helps['network route-table route'] = """
type: group
short-summary: Manage routes in a route table.
"""
helps['network route-table route create'] = """
type: command
short-summary: Create a route in a route table.
examples:
- name: Create a route that forces all inbound traffic to a Network Virtual Appliance.
text: |
az network route-table route create -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute \\
--next-hop-type VirtualAppliance --address-prefix 10.0.0.0/16 --next-hop-ip-address 10.0.100.4
"""
helps['network route-table route delete'] = """
type: command
short-summary: Delete a route from a route table.
examples:
- name: Delete a route from a route table.
text: az network route-table route delete -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute
"""
helps['network route-table route list'] = """
type: command
short-summary: List routes in a route table.
examples:
- name: List routes in a route table.
text: az network route-table route list -g MyResourceGroup --route-table-name MyRouteTable
"""
helps['network route-table route show'] = """
type: command
short-summary: Get the details of a route in a route table.
examples:
- name: Get the details of a route in a route table.
text: az network route-table route show -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute -o table
"""
helps['network route-table route update'] = """
type: command
short-summary: Update a route in a route table.
examples:
- name: Update a route in a route table to change the next hop ip address.
text: az network route-table route update -g MyResourceGroup --route-table-name MyRouteTable \\ -n MyRoute --next-hop-ip-address 10.0.100.5
- name: Update a route in a route table. (autogenerated)
text: |
az network route-table route update --address-prefix 10.0.0.0/16 --name MyRoute --next-hop-ip-address 10.0.100.5 --next-hop-type VirtualNetworkGateway --route-table-name MyRouteTable
crafted: true
"""
helps['network route-table show'] = """
type: command
short-summary: Get the details of a route table.
examples:
- name: Get the details of a route table.
text: az network route-table show -g MyResourceGroup -n MyRouteTable
"""
helps['network route-table update'] = """
type: command
short-summary: Update a route table.
examples:
- name: Update a route table to disable BGP route propogation.
text: az network route-table update -g MyResourceGroup -n MyRouteTable --disable-bgp-route-propagation true
"""
helps['network service-endpoint'] = """
type: group
short-summary: Manage policies related to service endpoints.
"""
helps['network service-endpoint policy'] = """
type: group
short-summary: Manage service endpoint policies.
"""
helps['network service-endpoint policy create'] = """
type: command
short-summary: Create a service endpoint policy.
examples:
- name: Create a service endpoint policy. (autogenerated)
text: |
az network service-endpoint policy create --name MyServiceEndpointPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy delete'] = """
type: command
short-summary: Delete a service endpoint policy.
"""
helps['network service-endpoint policy list'] = """
type: command
short-summary: List service endpoint policies.
examples:
- name: List service endpoint policies. (autogenerated)
text: |
az network service-endpoint policy list --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy show'] = """
type: command
short-summary: Get the details of a service endpoint policy.
examples:
- name: Get the details of a service endpoint policy. (autogenerated)
text: |
az network service-endpoint policy show --name MyServiceEndpointPolicy --resource-group MyResourceGroup
crafted: true
"""
helps['network service-endpoint policy update'] = """
type: command
short-summary: Update a service endpoint policy.
"""
helps['network service-endpoint policy-definition'] = """
type: group
short-summary: Manage service endpoint policy definitions.
"""
helps['network service-endpoint policy-definition create'] = """
type: command
short-summary: Create a service endpoint policy definition.
parameters:
- name: --service
populator-commands:
- az network service-endpoint list
"""
helps['network service-endpoint policy-definition delete'] = """
type: command
short-summary: Delete a service endpoint policy definition.
"""
helps['network service-endpoint policy-definition list'] = """
type: command
short-summary: List service endpoint policy definitions.
examples:
- name: List service endpoint policy definitions. (autogenerated)
text: |
az network service-endpoint policy-definition list --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network service-endpoint policy-definition show'] = """
type: command
short-summary: Get the details of a service endpoint policy definition.
"""
helps['network service-endpoint policy-definition update'] = """
type: command
short-summary: Update a service endpoint policy definition.
examples:
- name: Update a service endpoint policy definition. (autogenerated)
text: |
az network service-endpoint policy-definition update --add communities='12076:5010' --name MyServiceEndpointPolicyDefinition --policy-name MyPolicy --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network traffic-manager'] = """
type: group
short-summary: Manage the routing of incoming traffic.
"""
helps['network traffic-manager endpoint'] = """
type: group
short-summary: Manage Azure Traffic Manager end points.
"""
helps['network traffic-manager endpoint create'] = """
type: command
short-summary: Create a traffic manager endpoint.
parameters:
- name: --geo-mapping
populator-commands:
- az network traffic-manager endpoint show-geographic-hierarchy
examples:
- name: Create an endpoint for a performance profile to point to an Azure Web App endpoint.
text: |
az network traffic-manager endpoint create -g MyResourceGroup --profile-name MyTmProfile \\
-n MyEndpoint --type azureEndpoints --target-resource-id $MyWebApp1Id --endpoint-status enabled
"""
helps['network traffic-manager endpoint delete'] = """
type: command
short-summary: Delete a traffic manager endpoint.
examples:
- name: Delete a traffic manager endpoint.
text: az network traffic-manager endpoint delete -g MyResourceGroup \\ --profile-name MyTmProfile -n MyEndpoint --type azureEndpoints
- name: Delete a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint delete --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --subscription MySubscription --type azureEndpoints
crafted: true
"""
helps['network traffic-manager endpoint list'] = """
type: command
short-summary: List traffic manager endpoints.
examples:
- name: List traffic manager endpoints.
text: az network traffic-manager endpoint list -g MyResourceGroup --profile-name MyTmProfile
"""
helps['network traffic-manager endpoint show'] = """
type: command
short-summary: Get the details of a traffic manager endpoint.
examples:
- name: Get the details of a traffic manager endpoint.
text: |
az network traffic-manager endpoint show -g MyResourceGroup \\
--profile-name MyTmProfile -n MyEndpoint --type azureEndpoints
"""
helps['network traffic-manager endpoint show-geographic-hierarchy'] = """
type: command
short-summary: Get the default geographic hierarchy used by the geographic traffic routing method.
examples:
- name: Get the default geographic hierarchy used by the geographic traffic routing method.
text: az network traffic-manager endpoint show-geographic-hierarchy
"""
helps['network traffic-manager endpoint update'] = """
type: command
short-summary: Update a traffic manager endpoint.
examples:
- name: Update a traffic manager endpoint to change its weight.
text: az network traffic-manager endpoint update -g MyResourceGroup --profile-name MyTmProfile \\ -n MyEndpoint --weight 20 --type azureEndpoints
- name: Update a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint update --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --target webserver.mysite.com --type azureEndpoints
crafted: true
- name: Update a traffic manager endpoint. (autogenerated)
text: |
az network traffic-manager endpoint update --endpoint-status Enabled --name MyEndpoint --profile-name MyTmProfile --resource-group MyResourceGroup --type azureEndpoints
crafted: true
"""
helps['network traffic-manager profile'] = """
type: group
short-summary: Manage Azure Traffic Manager profiles.
"""
helps['network traffic-manager profile check-dns'] = """
type: command
short-summary: Check the availability of a relative DNS name.
long-summary: This checks for the avabilility of dns prefixes for trafficmanager.net.
examples:
- name: Check the availability of 'mywebapp.trafficmanager.net' in Azure.
text: az network traffic-manager profile check-dns -n mywebapp
"""
helps['network traffic-manager profile create'] = """
type: command
short-summary: Create a traffic manager profile.
examples:
- name: Create a traffic manager profile with performance routing.
text: |
az network traffic-manager profile create -g MyResourceGroup -n MyTmProfile --routing-method Performance \\
--unique-dns-name mywebapp --ttl 30 --protocol HTTP --port 80 --path "/"
"""
helps['network traffic-manager profile delete'] = """
type: command
short-summary: Delete a traffic manager profile.
examples:
- name: Delete a traffic manager profile.
text: az network traffic-manager profile delete -g MyResourceGroup -n MyTmProfile
- name: Delete a traffic manager profile. (autogenerated)
text: |
az network traffic-manager profile delete --name MyTmProfile --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network traffic-manager profile list'] = """
type: command
short-summary: List traffic manager profiles.
examples:
- name: List traffic manager profiles.
text: az network traffic-manager profile list -g MyResourceGroup
"""
helps['network traffic-manager profile show'] = """
type: command
short-summary: Get the details of a traffic manager profile.
examples:
- name: Get the details of a traffic manager profile.
text: az network traffic-manager profile show -g MyResourceGroup -n MyTmProfile
"""
helps['network traffic-manager profile update'] = """
type: command
short-summary: Update a traffic manager profile.
examples:
- name: Update a traffic manager profile to change the TTL to 300.
text: az network traffic-manager profile update -g MyResourceGroup -n MyTmProfile --ttl 300
- name: Update a traffic manager profile. (autogenerated)
text: |
az network traffic-manager profile update --name MyTmProfile --resource-group MyResourceGroup --status Enabled
crafted: true
- name: Update a traffic manager profile. (autogenerated)
text: |
az network traffic-manager profile update --name MyTmProfile --path "/" --routing-method Performance
crafted: true
"""
helps['network vnet'] = """
type: group
short-summary: Manage Azure Virtual Networks.
long-summary: To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network
"""
helps['network vnet check-ip-address'] = """
type: command
short-summary: Check if a private IP address is available for use within a virtual network.
examples:
- name: Check whether 10.0.0.4 is available within MyVnet.
text: az network vnet check-ip-address -g MyResourceGroup -n MyVnet --ip-address 10.0.0.4
"""
helps['network vnet create'] = """
type: command
short-summary: Create a virtual network.
long-summary: >
You may also create a subnet at the same time by specifying a subnet name and (optionally) an address prefix.
To learn about how to create a virtual network visit https://docs.microsoft.com/azure/virtual-network/manage-virtual-network#create-a-virtual-network
examples:
- name: Create a virtual network.
text: az network vnet create -g MyResourceGroup -n MyVnet
- name: Create a virtual network with a specific address prefix and one subnet.
text: |
az network vnet create -g MyResourceGroup -n MyVnet --address-prefix 10.0.0.0/16 \\
--subnet-name MySubnet --subnet-prefix 10.0.0.0/24
- name: Create a virtual network. (autogenerated)
text: |
az network vnet create --address-prefixes 10.0.0.0/16 --name MyVirtualNetwork --resource-group MyResourceGroup --subnet-name MyAseSubnet --subnet-prefixes 10.0.0.0/24
crafted: true
"""
helps['network vnet delete'] = """
type: command
short-summary: Delete a virtual network.
examples:
- name: Delete a virtual network.
text: az network vnet delete -g MyResourceGroup -n myVNet
"""
helps['network vnet list'] = """
type: command
short-summary: List virtual networks.
examples:
- name: List all virtual networks in a subscription.
text: az network vnet list
- name: List all virtual networks in a resource group.
text: az network vnet list -g MyResourceGroup
- name: List virtual networks in a subscription which specify a certain address prefix.
text: az network vnet list --query "[?contains(addressSpace.addressPrefixes, '10.0.0.0/16')]"
"""
helps['network vnet list-endpoint-services'] = """
type: command
short-summary: List which services support VNET service tunneling in a given region.
long-summary: To learn more about service endpoints visit https://docs.microsoft.com/azure/virtual-network/virtual-network-service-endpoints-configure#azure-cli
examples:
- name: List the endpoint services available for use in the West US region.
text: az network vnet list-endpoint-services -l westus -o table
"""
helps['network vnet peering'] = """
type: group
short-summary: Manage peering connections between Azure Virtual Networks.
long-summary: To learn more about virtual network peering visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-peering
"""
helps['network vnet peering create'] = """
type: command
short-summary: Create a virtual network peering connection.
long-summary: >
To successfully peer two virtual networks this command must be called twice with
the values for --vnet-name and --remote-vnet reversed.
examples:
- name: Create a peering connection between two virtual networks.
text: |
az network vnet peering create -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 \\
--remote-vnet MyVnet2Id --allow-vnet-access
"""
helps['network vnet peering delete'] = """
type: command
short-summary: Delete a peering.
examples:
- name: Delete a virtual network peering connection.
text: az network vnet peering delete -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1
"""
helps['network vnet peering list'] = """
type: command
short-summary: List peerings.
examples:
- name: List all peerings of a specified virtual network.
text: az network vnet peering list -g MyResourceGroup --vnet-name MyVnet1
"""
helps['network vnet peering show'] = """
type: command
short-summary: Show details of a peering.
examples:
- name: Show all details of the specified virtual network peering.
text: az network vnet peering show -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1
"""
helps['network vnet peering update'] = """
type: command
short-summary: Update a peering.
examples:
- name: Change forwarded traffic configuration of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowForwardedTraffic=true
- name: Change virtual network access of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowVirtualNetworkAccess=true
- name: Change gateway transit property configuration of a virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set allowGatewayTransit=true
- name: Use remote gateways in virtual network peering.
text: >
az network vnet peering update -g MyResourceGroup -n MyVnet1ToMyVnet2 --vnet-name MyVnet1 --set useRemoteGateways=true
"""
helps['network vnet show'] = """
type: command
short-summary: Get the details of a virtual network.
examples:
- name: Get details for MyVNet.
text: az network vnet show -g MyResourceGroup -n MyVNet
"""
helps['network vnet list-available-ips'] = """
type: command
short-summary: List some available ips in the vnet.
examples:
- name: List some available ips in the vnet.
text: az network vnet list-available-ips -g MyResourceGroup -n MyVNet
"""
helps['network vnet subnet'] = """
type: group
short-summary: Manage subnets in an Azure Virtual Network.
long-summary: To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet
"""
helps['network vnet subnet create'] = """
type: command
short-summary: Create a subnet and associate an existing NSG and route table.
parameters:
- name: --service-endpoints
short-summary: Space-separated list of services allowed private access to this subnet.
populator-commands:
- az network vnet list-endpoint-services
- name: --nat-gateway
short-summary: Attach Nat Gateway to subnet
examples:
- name: Create new subnet attached to an NSG with a custom route table.
text: |
az network vnet subnet create -g MyResourceGroup --vnet-name MyVnet -n MySubnet \\
--address-prefixes 10.0.0.0/24 --network-security-group MyNsg --route-table MyRouteTable
- name: Create new subnet attached to a NAT gateway.
text: az network vnet subnet create -n MySubnet --vnet-name MyVnet -g MyResourceGroup --nat-gateway MyNatGateway --address-prefixes "10.0.0.0/21"
"""
helps['network vnet subnet delete'] = """
type: command
short-summary: Delete a subnet.
examples:
- name: Delete a subnet.
text: az network vnet subnet delete -g MyResourceGroup -n MySubnet
- name: Delete a subnet. (autogenerated)
text: |
az network vnet subnet delete --name MySubnet --resource-group MyResourceGroup --vnet-name MyVnet
crafted: true
"""
helps['network vnet subnet list'] = """
type: command
short-summary: List the subnets in a virtual network.
examples:
- name: List the subnets in a virtual network.
text: az network vnet subnet list -g MyResourceGroup --vnet-name MyVNet
"""
helps['network vnet subnet list-available-delegations'] = """
type: command
short-summary: List the services available for subnet delegation.
examples:
- name: Retrieve the service names for available delegations in the West US region.
text: az network vnet subnet list-available-delegations -l westus --query [].serviceName
- name: List the services available for subnet delegation. (autogenerated)
text: |
az network vnet subnet list-available-delegations --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet subnet show'] = """
type: command
short-summary: Show details of a subnet.
examples:
- name: Show the details of a subnet associated with a virtual network.
text: az network vnet subnet show -g MyResourceGroup -n MySubnet --vnet-name MyVNet
"""
helps['network vnet subnet update'] = """
type: command
short-summary: Update a subnet.
parameters:
- name: --service-endpoints
short-summary: Space-separated list of services allowed private access to this subnet.
populator-commands:
- az network vnet list-endpoint-services
- name: --nat-gateway
short-summary: Attach Nat Gateway to subnet
examples:
- name: Associate a network security group to a subnet.
text: az network vnet subnet update -g MyResourceGroup -n MySubnet --vnet-name MyVNet --network-security-group MyNsg
- name: Update subnet with NAT gateway.
text: az network vnet subnet update -n MySubnet --vnet-name MyVnet -g MyResourceGroup --nat-gateway MyNatGateway --address-prefixes "10.0.0.0/21"
- name: Disable the private endpoint network policies
text: az network vnet subnet update -n MySubnet --vnet-name MyVnet -g MyResourceGroup --disable-private-endpoint-network-policies
"""
helps['network vnet update'] = """
type: command
short-summary: Update a virtual network.
examples:
- name: Update a virtual network with the IP address of a DNS server.
text: az network vnet update -g MyResourceGroup -n MyVNet --dns-servers 10.2.0.8
- name: Update a virtual network. (autogenerated)
text: |
az network vnet update --address-prefixes 40.1.0.0/24 --name MyVNet --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet-gateway'] = """
type: group
short-summary: Use an Azure Virtual Network Gateway to establish secure, cross-premises connectivity.
long-summary: >
To learn more about Azure Virtual Network Gateways, visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli
"""
helps['network vnet-gateway create'] = """
type: command
short-summary: Create a virtual network gateway.
examples:
- name: Create a basic virtual network gateway for site-to-site connectivity.
text: |
az network vnet-gateway create -g MyResourceGroup -n MyVnetGateway --public-ip-address MyGatewayIp \\
--vnet MyVnet --gateway-type Vpn --sku VpnGw1 --vpn-type RouteBased --no-wait
- name: >
Create a basic virtual network gateway that provides point-to-site connectivity with a RADIUS secret that matches what is configured on a RADIUS server.
text: |
az network vnet-gateway create -g MyResourceGroup -n MyVnetGateway --public-ip-address MyGatewayIp \\
--vnet MyVnet --gateway-type Vpn --sku VpnGw1 --vpn-type RouteBased --address-prefixes 40.1.0.0/24 \\
--client-protocol IkeV2 SSTP --radius-secret 111_aaa --radius-server 30.1.1.15 --vpn-gateway-generation Generation1
- name: Create a virtual network gateway. (autogenerated)
text: |
az network vnet-gateway create --gateway-type Vpn --location westus2 --name MyVnetGateway --no-wait --public-ip-addresses myVGPublicIPAddress --resource-group MyResourceGroup --sku Basic --vnet MyVnet --vpn-type PolicyBased
crafted: true
"""
helps['network vnet-gateway delete'] = """
type: command
short-summary: Delete a virtual network gateway.
long-summary: >
In order to delete a Virtual Network Gateway, you must first delete ALL Connection objects in Azure that are
connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use.
For more information, follow the order of instructions on this page:
https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
examples:
- name: Delete a virtual network gateway.
text: az network vnet-gateway delete -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway ipsec-policy'] = """
type: group
short-summary: Manage virtual network gateway IPSec policies.
"""
helps['network vnet-gateway ipsec-policy add'] = """
type: command
short-summary: Add a virtual network gateway IPSec policy.
long-summary: Set all IPsec policies of a virtual network gateway. If you want to set any IPsec policy, you must set them all.
examples:
- name: Add specified IPsec policies to a gateway instead of relying on defaults.
text: |
az network vnet-gateway ipsec-policy add -g MyResourceGroup --gateway-name MyGateway \\
--dh-group DHGroup14 --ike-encryption AES256 --ike-integrity SHA384 --ipsec-encryption DES3 \\
--ipsec-integrity GCMAES256 --pfs-group PFS2048 --sa-lifetime 27000 --sa-max-size 102400000
"""
helps['network vnet-gateway ipsec-policy clear'] = """
type: command
short-summary: Delete all IPsec policies on a virtual network gateway.
examples:
- name: Remove all previously specified IPsec policies from a gateway.
text: az network vnet-gateway ipsec-policy clear -g MyResourceGroup --gateway-name MyConnection
"""
helps['network vnet-gateway ipsec-policy list'] = """
type: command
short-summary: List IPSec policies associated with a virtual network gateway.
examples:
- name: List the IPsec policies set on a gateway.
text: az network vnet-gateway ipsec-policy list -g MyResourceGroup --gateway-name MyConnection
"""
helps['network vnet-gateway list'] = """
type: command
short-summary: List virtual network gateways.
examples:
- name: List virtual network gateways in a resource group.
text: az network vnet-gateway list -g MyResourceGroup
"""
helps['network vnet-gateway list-advertised-routes'] = """
type: command
short-summary: List the routes of a virtual network gateway advertised to the specified peer.
examples:
- name: List the routes of a virtual network gateway advertised to the specified peer.
text: az network vnet-gateway list-advertised-routes -g MyResourceGroup -n MyVnetGateway --peer 23.10.10.9
"""
helps['network vnet-gateway list-bgp-peer-status'] = """
type: command
short-summary: Retrieve the status of BGP peers.
examples:
- name: Retrieve the status of a BGP peer.
text: az network vnet-gateway list-bgp-peer-status -g MyResourceGroup -n MyVnetGateway --peer 23.10.10.9
"""
helps['network vnet-gateway list-learned-routes'] = """
type: command
short-summary: This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
examples:
- name: Retrieve a list of learned routes.
text: az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway reset'] = """
type: command
short-summary: Reset a virtual network gateway.
examples:
- name: Reset a virtual network gateway.
text: az network vnet-gateway reset -g MyResourceGroup -n MyVnetGateway
- name: Reset a virtual network gateway with Active-Active feature enabled.
text: az network vnet-gateway reset -g MyResourceGroup -n MyVnetGateway --gateway-vip MyGatewayIP
"""
helps['network vnet-gateway revoked-cert'] = """
type: group
short-summary: Manage revoked certificates in a virtual network gateway.
long-summary: Prevent machines using this certificate from accessing Azure through this gateway.
"""
helps['network vnet-gateway revoked-cert create'] = """
type: command
short-summary: Revoke a certificate.
examples:
- name: Revoke a certificate.
text: |
az network vnet-gateway revoked-cert create -g MyResourceGroup -n MyRootCertificate \\
--gateway-name MyVnetGateway --thumbprint abc123
"""
helps['network vnet-gateway revoked-cert delete'] = """
type: command
short-summary: Delete a revoked certificate.
examples:
- name: Delete a revoked certificate.
text: az network vnet-gateway revoked-cert delete -g MyResourceGroup -n MyRootCertificate --gateway-name MyVnetGateway
- name: Delete a revoked certificate. (autogenerated)
text: |
az network vnet-gateway revoked-cert delete --gateway-name MyVnetGateway --name MyRootCertificate --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway root-cert'] = """
type: group
short-summary: Manage root certificates of a virtual network gateway.
"""
helps['network vnet-gateway root-cert create'] = """
type: command
short-summary: Upload a root certificate.
examples:
- name: Add a Root Certificate to the list of certs allowed to connect to this Gateway.
text: |
az network vnet-gateway root-cert create -g MyResourceGroup -n MyRootCertificate \\
--gateway-name MyVnetGateway --public-cert-data MyCertificateData
"""
helps['network vnet-gateway root-cert delete'] = """
type: command
short-summary: Delete a root certificate.
examples:
- name: Remove a certificate from the list of Root Certificates whose children are allowed to access this Gateway.
text: az network vnet-gateway root-cert delete -g MyResourceGroup -n MyRootCertificate --gateway-name MyVnetGateway
- name: Delete a root certificate. (autogenerated)
text: |
az network vnet-gateway root-cert delete --gateway-name MyVnetGateway --name MyRootCertificate --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway show'] = """
type: command
short-summary: Get the details of a virtual network gateway.
examples:
- name: Get the details of a virtual network gateway.
text: az network vnet-gateway show -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway update'] = """
type: command
short-summary: Update a virtual network gateway.
examples:
- name: Change the SKU of a virtual network gateway.
text: az network vnet-gateway update -g MyResourceGroup -n MyVnetGateway --sku VpnGw2
- name: Update a virtual network gateway. (autogenerated)
text: |
az network vnet-gateway update --address-prefixes 40.1.0.0/24 --client-protocol IkeV2 --name MyVnetGateway --resource-group MyResourceGroup
crafted: true
- name: Update a virtual network gateway. (autogenerated)
text: |
az network vnet-gateway update --name MyVnetGateway --remove tags.no_80 --resource-group MyResourceGroup
crafted: true
"""
helps['network vnet-gateway vpn-client'] = """
type: group
short-summary: Download a VPN client configuration required to connect to Azure via point-to-site.
"""
helps['network vnet-gateway vpn-client generate'] = """
type: command
short-summary: Generate VPN client configuration.
long-summary: The command outputs a URL to a zip file for the generated VPN client configuration.
examples:
- name: Create the VPN client configuration for RADIUS with EAP-MSCHAV2 authentication.
text: az network vnet-gateway vpn-client generate -g MyResourceGroup -n MyVnetGateway --authentication-method EAPMSCHAPv2
- name: Create the VPN client configuration for AMD64 architecture.
text: az network vnet-gateway vpn-client generate -g MyResourceGroup -n MyVnetGateway --processor-architecture Amd64
- name: Generate VPN client configuration. (autogenerated)
text: |
az network vnet-gateway vpn-client generate --name MyVnetGateway --processor-architecture Amd64 --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vnet-gateway vpn-client show-url'] = """
type: command
short-summary: Retrieve a pre-generated VPN client configuration.
long-summary: The profile needs to be generated first using vpn-client generate command.
examples:
- name: Get the pre-generated point-to-site VPN client of the virtual network gateway.
text: az network vnet-gateway vpn-client show-url -g MyResourceGroup -n MyVnetGateway
"""
helps['network vnet-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the virtual network gateway is met.
examples:
- name: Pause CLI until the virtual network gateway is created.
text: az network vnet-gateway wait -g MyResourceGroup -n MyVnetGateway --created
- name: Place the CLI in a waiting state until a condition of the virtual network gateway is met. (autogenerated)
text: |
az network vnet-gateway wait --name MyVnetGateway --resource-group MyResourceGroup --updated
crafted: true
"""
helps['network vnet-gateway aad'] = """
type: group
short-summary: Manage AAD(Azure Active Directory) authentication of a virtual network gateway
"""
helps['network vnet-gateway aad assign'] = """
type: command
short-summary: Assign/Update AAD(Azure Active Directory) authentication to a virtual network gateway.
examples:
- name: Assign AAD authentication to a virtual network gateway
text: |-
az network vnet-gateway aad assign \\
--resource-group MyResourceGroup \\
--gateway-name MyVnetGateway \\
--tenant MyAADTenantURI \\
--audience MyAADAudienceId \\
--issuer MyAADIssuerURI
"""
helps['network vnet-gateway aad show'] = """
type: command
short-summary: Show AAD(Azure Active Directory) authentication of a virtual network gateway
examples:
- name: Show AAD information
text: az network vnet-gateway aad show --resource-group MyResourceGroup --gateway-name MyVnetGateway
"""
helps['network vnet-gateway aad remove'] = """
type: command
short-summary: Remove AAD(Azure Active Directory) authentication from a virtual network gateway
examples:
- name: Remove AAD information
text: az network vnet-gateway aad remove --resource-group MyResourceGroup --gateway-name MyVnetGateway
"""
helps['network vpn-connection'] = """
type: group
short-summary: Manage VPN connections.
long-summary: >
For more information on site-to-site connections,
visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli.
For more information on Vnet-to-Vnet connections, visit https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-howto-vnet-vnet-cli
"""
helps['network vpn-connection create'] = """
type: command
short-summary: Create a VPN connection.
long-summary: The VPN Gateway and Local Network Gateway must be provisioned before creating the connection between them.
parameters:
- name: --vnet-gateway1
short-summary: Name or ID of the source virtual network gateway.
- name: --vnet-gateway2
short-summary: Name or ID of the destination virtual network gateway to connect to using a 'Vnet2Vnet' connection.
- name: --local-gateway2
short-summary: Name or ID of the destination local network gateway to connect to using an 'IPSec' connection.
- name: --express-route-circuit2
short-summary: Name or ID of the destination ExpressRoute to connect to using an 'ExpressRoute' connection.
- name: --authorization-key
short-summary: The authorization key for the VPN connection.
- name: --enable-bgp
short-summary: Enable BGP for this VPN connection.
- name: --validate
short-summary: Display and validate the ARM template but do not create any resources.
examples:
- name: >
Create a site-to-site connection between an Azure virtual network and an on-premises local network gateway.
text: >
az network vpn-connection create -g MyResourceGroup -n MyConnection --vnet-gateway1 MyVnetGateway --local-gateway2 MyLocalGateway --shared-key Abc123
- name: Create a VPN connection. (autogenerated)
text: |
az network vpn-connection create --location westus2 --name MyConnection --resource-group MyResourceGroup --shared-key Abc123 --vnet-gateway1 MyVnetGateway --vnet-gateway2 /subscriptions/{subscriptionID}/resourceGroups/TestBGPRG1/providers/Microsoft.Network/virtualNetworkGateways/VNet1GW
crafted: true
- name: Create a VPN connection. (autogenerated)
text: |
az network vpn-connection create --local-gateway2 MyLocalGateway --location westus2 --name MyConnection --resource-group MyResourceGroup --shared-key Abc123 --vnet-gateway1 MyVnetGateway
crafted: true
"""
helps['network vpn-connection delete'] = """
type: command
short-summary: Delete a VPN connection.
examples:
- name: Delete a VPN connection.
text: az network vpn-connection delete -g MyResourceGroup -n MyConnection
"""
helps['network vpn-connection ipsec-policy'] = """
type: group
short-summary: Manage VPN connection IPSec policies.
"""
helps['network vpn-connection ipsec-policy add'] = """
type: command
short-summary: Add a VPN connection IPSec policy.
long-summary: Set all IPsec policies of a VPN connection. If you want to set any IPsec policy, you must set them all.
examples:
- name: Add specified IPsec policies to a connection instead of relying on defaults.
text: |
az network vpn-connection ipsec-policy add -g MyResourceGroup --connection-name MyConnection \\
--dh-group DHGroup14 --ike-encryption AES256 --ike-integrity SHA384 --ipsec-encryption DES3 \\
--ipsec-integrity GCMAES256 --pfs-group PFS2048 --sa-lifetime 27000 --sa-max-size 102400000
"""
helps['network vpn-connection ipsec-policy clear'] = """
type: command
short-summary: Delete all IPsec policies on a VPN connection.
examples:
- name: Remove all previously specified IPsec policies from a connection.
text: az network vpn-connection ipsec-policy clear -g MyResourceGroup --connection-name MyConnection
"""
helps['network vpn-connection ipsec-policy list'] = """
type: command
short-summary: List IPSec policies associated with a VPN connection.
examples:
- name: List the IPsec policies set on a connection.
text: az network vpn-connection ipsec-policy list -g MyResourceGroup --connection-name MyConnection
"""
helps['network vpn-connection list'] = """
type: command
short-summary: List all VPN connections in a resource group.
examples:
- name: List all VPN connections in a resource group.
text: az network vpn-connection list -g MyResourceGroup
"""
helps['network vpn-connection shared-key'] = """
type: group
short-summary: Manage VPN shared keys.
"""
helps['network vpn-connection shared-key reset'] = """
type: command
short-summary: Reset a VPN connection shared key.
examples:
- name: Reset the shared key on a connection.
text: az network vpn-connection shared-key reset -g MyResourceGroup --connection-name MyConnection --key-length 128
"""
helps['network vpn-connection shared-key show'] = """
type: command
short-summary: Retrieve a VPN connection shared key.
examples:
- name: View the shared key of a connection.
text: az network vpn-connection shared-key show -g MyResourceGroup --connection-name MyConnection
- name: Retrieve a VPN connection shared key. (autogenerated)
text: |
az network vpn-connection shared-key show --connection-name MyConnection --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['network vpn-connection shared-key update'] = """
type: command
short-summary: Update a VPN connection shared key.
examples:
- name: Change the shared key for the connection to "Abc123".
text: az network vpn-connection shared-key update -g MyResourceGroup --connection-name MyConnection --value Abc123
- name: Update a VPN connection shared key. (autogenerated)
text: |
az network vpn-connection shared-key update --connection-name MyConnection --resource-group MyResourceGroup --subscription MySubscription --value Abc123
crafted: true
"""
helps['network vpn-connection show'] = """
type: command
short-summary: Get the details of a VPN connection.
examples:
- name: View the details of a VPN connection.
text: az network vpn-connection show -g MyResourceGroup -n MyConnection
"""
helps['network vpn-connection update'] = """
type: command
short-summary: Update a VPN connection.
examples:
- name: Add BGP to an existing connection.
text: az network vpn-connection update -g MyResourceGroup -n MyConnection --enable-bgp True
- name: Update a VPN connection. (autogenerated)
text: |
az network vpn-connection update --name MyConnection --resource-group MyResourceGroup --use-policy-based-traffic-selectors true
crafted: true
"""
helps['network vrouter'] = """
type: group
short-summary: Manage the virtual router. This feature supports both VirtualHub and VirtualRouter. Considering VirtualRouter is deprecated, we recommend to create VirtualRouter with --hosted-subnet instead
"""
helps['network vrouter create'] = """
type: command
short-summary: Create a virtual router.
"""
helps['network vrouter update'] = """
type: command
short-summary: Update a virtual router.
"""
helps['network vrouter show'] = """
type: command
short-summary: Show a virtual router.
"""
helps['network vrouter list'] = """
type: command
short-summary: List all virtual routers under a subscription or a resource group.
"""
helps['network vrouter delete'] = """
type: command
short-summary: Delete a virtual router under a resource group.
"""
helps['network vrouter peering'] = """
type: group
short-summary: Manage the virtual router peering.
"""
helps['network vrouter peering create'] = """
type: command
short-summary: Create a virtual router peering.
"""
helps['network vrouter peering update'] = """
type: command
short-summary: Update a virtual router peering.
"""
helps['network vrouter peering list'] = """
type: command
short-summary: List all virtual router peerings under a resource group.
"""
helps['network vrouter peering show'] = """
type: command
short-summary: Show a virtual router peering
"""
helps['network vrouter peering delete'] = """
type: command
short-summary: Delete a virtual router peering.
"""
helps['network watcher'] = """
type: group
short-summary: Manage the Azure Network Watcher.
long-summary: >
Network Watcher assists with monitoring and diagnosing conditions at a network scenario level. To learn more visit https://docs.microsoft.com/azure/network-watcher/
"""
helps['network watcher configure'] = """
type: command
short-summary: Configure the Network Watcher service for different regions.
parameters:
- name: --enabled
short-summary: Enabled status of Network Watcher in the specified regions.
- name: --locations -l
short-summary: Space-separated list of locations to configure.
- name: --resource-group -g
short-summary: Name of resource group. Required when enabling new regions.
long-summary: >
When a previously disabled region is enabled to use Network Watcher, a
Network Watcher resource will be created in this resource group.
examples:
- name: Configure Network Watcher for the West US region.
text: az network watcher configure -g NetworkWatcherRG -l westus --enabled true
"""
helps['network watcher connection-monitor'] = """
type: group
short-summary: Manage connection monitoring between an Azure Virtual Machine and any IP resource.
long-summary: >
Connection monitor can be used to monitor network connectivity between an Azure virtual machine and an IP address.
The IP address can be assigned to another Azure resource or a resource on the Internet or on-premises. To learn
more visit https://aka.ms/connectionmonitordoc
"""
helps['network watcher connection-monitor create'] = """
type: command
short-summary: Create a connection monitor.
long-summary: |
This extension allow to create V1 and V2 version of connection monitor.
V1 connection monitor supports single source and destination endpoint which comes with V1 argument groups as usual.
V2 connection monitor supports multiple endpoints and several test protocol which comes with V2 argument groups.
parameters:
- name: --source-resource
short-summary: >
Currently only Virtual Machines are supported.
- name: --dest-resource
short-summary: >
Currently only Virtual Machines are supported.
examples:
- name: Create a connection monitor for a virtual machine.
text: |
az network watcher connection-monitor create -g MyResourceGroup -n MyConnectionMonitorName \\
--source-resource MyVM
- name: Create a V2 connection monitor
text: >
az network watcher connection-monitor create
--name MyV2ConnectionMonitor
--endpoint-source-name "vm01"
--endpoint-source-resource-id MyVM01ResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name TCPTestConfig
--protocol Tcp
--tcp-port 2048
- name: Create a connection monitor. (autogenerated)
text: |
az network watcher connection-monitor create --endpoint-dest-address bing.com --endpoint-dest-name bing --endpoint-source-name "vm01" --endpoint-source-resource-id MyVM01ResourceID --location westus2 --name MyConnectionMonitorName --protocol Tcp --tcp-port 2048 --test-config-name TCPTestConfig
crafted: true
"""
helps['network watcher connection-monitor delete'] = """
type: command
short-summary: Delete a connection monitor for the given region.
examples:
- name: Delete a connection monitor for the given region.
text: az network watcher connection-monitor delete -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor list'] = """
type: command
short-summary: List connection monitors for the given region.
examples:
- name: List a connection monitor for the given region.
text: az network watcher connection-monitor list -l westus
- name: List connection monitors for the given region. (autogenerated)
text: |
az network watcher connection-monitor list --location westus --subscription MySubscription
crafted: true
"""
helps['network watcher connection-monitor query'] = """
type: command
short-summary: Query a snapshot of the most recent connection state of a connection monitor.
examples:
- name: List a connection monitor for the given region.
text: az network watcher connection-monitor query -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor show'] = """
type: command
short-summary: Shows a connection monitor by name.
examples:
- name: Show a connection monitor for the given name.
text: az network watcher connection-monitor show -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor start'] = """
type: command
short-summary: Start the specified connection monitor.
examples:
- name: Start the specified connection monitor.
text: az network watcher connection-monitor start -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor stop'] = """
type: command
short-summary: Stop the specified connection monitor.
examples:
- name: Stop the specified connection monitor.
text: az network watcher connection-monitor stop -l westus -n MyConnectionMonitorName
"""
helps['network watcher connection-monitor endpoint'] = """
type: group
short-summary: Manage endpoint of a connection monitor
"""
helps['network watcher connection-monitor endpoint add'] = """
type: command
short-summary: Add an endpoint to a connection monitor
examples:
- name: Add an external address as a destination endpoint
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyExternalEndpoint
--address "bing.com"
--dest-test-groups DefaultTestGroup
--type ExternalAddress
- name: Add an Azure VM as a source endpoint
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyVMEndpoint
--resource-id MyVMResourceID
--source-test-groups DefaultTestGroup
--type AzureVM
- name: Add a Subnet as a source endpoint with addresses excluded
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MySubnetEndpoint
--resource-id MySubnetID
--source-test-groups DefaultTestGroup
--type AzureSubnet
--address-exclude 10.0.0.25 10.0.0.30
--coverage-level BelowAverage
"""
helps['network watcher connection-monitor endpoint remove'] = """
type: command
short-summary: Remove an endpoint from a connection monitor
examples:
- name: Remove endpoint from all test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
- name: Remove endpoint from two test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--test-groups DefaultTestGroup HealthCheckTestGroup
"""
helps['network watcher connection-monitor endpoint show'] = """
type: command
short-summary: Show an endpoint from a connection monitor
"""
helps['network watcher connection-monitor endpoint list'] = """
type: command
short-summary: List all endpoints form a connection monitor
"""
helps['network watcher connection-monitor test-configuration'] = """
type: group
short-summary: Manage test configuration of a connection monitor
"""
helps['network watcher connection-monitor test-configuration add'] = """
type: command
short-summary: Add a test configuration to a connection monitor
examples:
- name: Add a test configuration with HTTP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups DefaultTestGroup
--protocol Http
--http-request-header name=Host value=bing.com
--http-request-header name=UserAgent value=Edge
- name: Add a test configuration with TCP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups TCPTestGroup DefaultTestGroup
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-configuration remove'] = """
type: command
short-summary: Remove a test configuration from a connection monitor
examples:
- name: Remove a test configuration from all test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyTCPTestConfiguration
- name: Remove a test configuration from two test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups HTTPTestGroup DefaultTestGroup
"""
helps['network watcher connection-monitor test-configuration show'] = """
type: command
short-summary: Show a test configuration from a connection monitor
"""
helps['network watcher connection-monitor test-configuration list'] = """
type: command
short-summary: List all test configurations of a connection monitor
"""
helps['network watcher connection-monitor test-group'] = """
type: group
short-summary: Manage a test group of a connection monitor
"""
helps['network watcher connection-monitor test-group add'] = """
type: command
short-summary: Add a test group along with new-added/existing endpoint and test configuration to a connection monitor
examples:
- name: Add a test group along with existing endpoint and test configuration via their names
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-dest-name MyDestinationEndpoint
--test-config-name MyTestConfiguration
- name: Add a test group long with new-added source endpoint and existing test configuration via its name
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyLogAnalysisWorkspaceID
--endpoint-dest-name MyExistingDestinationEndpoint
--test-config-name MyExistingTestConfiguration
- name: Add a test group along with new-added endpoints and test configuration
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyVMResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name MyNewTestConfiguration
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-group remove'] = """
type: command
short-summary: Remove test group from a connection monitor
"""
helps['network watcher connection-monitor test-group show'] = """
type: command
short-summary: Show a test group of a connection monitor
"""
helps['network watcher connection-monitor test-group list'] = """
type: command
short-summary: List all test groups of a connection monitor
"""
helps['network watcher connection-monitor output'] = """
type: group
short-summary: Manage output of connection monitor
"""
helps['network watcher connection-monitor output add'] = """
type: command
short-summary: Add an output to a connection monitor
"""
helps['network watcher connection-monitor output remove'] = """
type: command
short-summary: Remove all outputs from a connection monitor
"""
helps['network watcher connection-monitor output list'] = """
type: command
short-summary: List all output from a connection monitor
"""
helps['network watcher flow-log'] = """
type: group
short-summary: Manage network security group flow logging.
long-summary: >
For more information about configuring flow logs visit https://docs.microsoft.com/azure/network-watcher/network-watcher-nsg-flow-logging-cli
"""
helps['network watcher flow-log configure'] = """
type: command
short-summary: Configure flow logging on a network security group.
parameters:
- name: --nsg
short-summary: Name or ID of the Network Security Group to target.
- name: --enabled
short-summary: Enable logging.
- name: --retention
short-summary: Number of days to retain logs.
- name: --storage-account
short-summary: Name or ID of the storage account in which to save the flow logs.
examples:
- name: Enable NSG flow logs.
text: az network watcher flow-log configure -g MyResourceGroup --enabled true --nsg MyNsg --storage-account MyStorageAccount
- name: Disable NSG flow logs.
text: az network watcher flow-log configure -g MyResourceGroup --enabled false --nsg MyNsg
"""
helps['network watcher flow-log create'] = """
type: command
short-summary: Create a flow log on a network security group.
examples:
- name: Create a flow log with Network Security Group name
text: >
az network watcher flow-log create
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--nsg MyNetworkSecurityGroupName
--storage-account account
- name: Create a flow log with Network Security Group ID (could be in other resource group)
text: >
az network watcher flow-log create
--location westus
--name MyFlowLog
--nsg MyNetworkSecurityGroupID
--storage-account account
"""
helps['network watcher flow-log list'] = """
type: command
short-summary: List all flow log resources for the specified Network Watcher
examples:
- name: List all flow log resources for the specified Network Watcher. (autogenerated)
text: |
az network watcher flow-log list --location westus2
crafted: true
"""
helps['network watcher flow-log delete'] = """
type: command
short-summary: Delete the specified flow log resource.
examples:
- name: Delete the specified flow log resource. (autogenerated)
text: |
az network watcher flow-log delete --location westus2 --name MyFlowLogger --subscription MySubscription
crafted: true
"""
helps['network watcher flow-log show'] = """
type: command
short-summary: Get the flow log configuration of a network security group.
examples:
- name: Show NSG flow logs. (Deprecated)
text: az network watcher flow-log show -g MyResourceGroup --nsg MyNsg
- name: Show NSG flow logs with Azure Resource Management formatted.
text: az network watcher flow-log show --location MyNetworkWatcher --name MyFlowLog
"""
helps['network watcher flow-log update'] = """
type: command
short-summary: Update the flow log configuration of a network security group
examples:
- name: Update storage account with name to let resource group identify the storage account and network watcher
text: >
az network watcher flow-log update
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--storage-account accountname
- name: Update storage account with ID to let location identify the network watcher
text: >
az network watcher flow-log update
--location westus
--resource-group MyResourceGroup
--name MyFlowLog
--storage-account accountid
- name: Update Network Security Group on another resource group
text: >
az network watcher flow-log update
--location westus
--resource-group MyAnotherResourceGroup
--name MyFlowLog
--nsg MyNSG
- name: Update Workspace on another resource group
text: >
az network watcher flow-log update
--location westus
--resource-group MyAnotherResourceGroup
--name MyFlowLog
--workspace MyAnotherLogAnalyticWorkspace
"""
helps['network watcher list'] = """
type: command
short-summary: List Network Watchers.
examples:
- name: List all Network Watchers in a subscription.
text: az network watcher list
"""
helps['network watcher packet-capture'] = """
type: group
short-summary: Manage packet capture sessions on VMs.
long-summary: >
These commands require that both Azure Network Watcher is enabled for the VMs region and that AzureNetworkWatcherExtension is enabled on the VM.
For more information visit https://docs.microsoft.com/azure/network-watcher/network-watcher-packet-capture-manage-cli
"""
helps['network watcher packet-capture create'] = """
type: command
short-summary: Create and start a packet capture session.
parameters:
- name: --capture-limit
short-summary: The maximum size in bytes of the capture output.
- name: --capture-size
short-summary: Number of bytes captured per packet. Excess bytes are truncated.
- name: --time-limit
short-summary: Maximum duration of the capture session in seconds.
- name: --storage-account
short-summary: Name or ID of a storage account to save the packet capture to.
- name: --storage-path
short-summary: Fully qualified URI of an existing storage container in which to store the capture file.
long-summary: >
If not specified, the container 'network-watcher-logs' will be
created if it does not exist and the capture file will be stored there.
- name: --file-path
short-summary: >
Local path on the targeted VM at which to save the packet capture. For Linux VMs, the
path must start with /var/captures.
- name: --vm
short-summary: Name or ID of the VM to target.
- name: --filters
short-summary: JSON encoded list of packet filters. Use `@{path}` to load from file.
examples:
- name: Create a packet capture session on a VM.
text: az network watcher packet-capture create -g MyResourceGroup -n MyPacketCaptureName --vm MyVm --storage-account MyStorageAccount
- name: Create a packet capture session on a VM with optional filters for protocols, local IP address and remote IP address ranges and ports.
text: |
az network watcher packet-capture create -g MyResourceGroup -n MyPacketCaptureName --vm MyVm \\
--storage-account MyStorageAccount --filters '[ \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"20" \\
}, \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"80" \\
}, \\
{ \\
"protocol":"TCP", \\
"remoteIPAddress":"1.1.1.1-255.255.255", \\
"localIPAddress":"10.0.0.3", \\
"remotePort":"443" \\
}, \\
{ \\
"protocol":"UDP" \\
}]'
"""
helps['network watcher packet-capture delete'] = """
type: command
short-summary: Delete a packet capture session.
examples:
- name: Delete a packet capture session. This only deletes the session and not the capture file.
text: az network watcher packet-capture delete -n packetCaptureName -l westcentralus
- name: Delete a packet capture session. (autogenerated)
text: |
az network watcher packet-capture delete --location westcentralus --name packetCaptureName --subscription MySubscription
crafted: true
"""
helps['network watcher packet-capture list'] = """
type: command
short-summary: List all packet capture sessions within a resource group.
examples:
- name: List all packet capture sessions within a region.
text: az network watcher packet-capture list -l westus
- name: List all packet capture sessions within a resource group (autogenerated)
text: |
az network watcher packet-capture list --location westus --subscription MySubscription
crafted: true
"""
helps['network watcher packet-capture show'] = """
type: command
short-summary: Show details of a packet capture session.
examples:
- name: Show a packet capture session.
text: az network watcher packet-capture show -l westus -n MyPacketCapture
"""
helps['network watcher packet-capture show-status'] = """
type: command
short-summary: Show the status of a packet capture session.
examples:
- name: Show the status of a packet capture session.
text: az network watcher packet-capture show-status -l westus -n MyPacketCapture
"""
helps['network watcher packet-capture stop'] = """
type: command
short-summary: Stop a running packet capture session.
examples:
- name: Stop a running packet capture session.
text: az network watcher packet-capture stop -l westus -n MyPacketCapture
"""
helps['network watcher run-configuration-diagnostic'] = """
type: command
short-summary: Run a configuration diagnostic on a target resource.
long-summary: >
Requires that Network Watcher is enabled for the region in which the target is located.
examples:
- name: Run configuration diagnostic on a VM with a single query.
text: |
az network watcher run-configuration-diagnostic --resource {VM_ID}
--direction Inbound --protocol TCP --source 12.11.12.14 --destination 10.1.1.4 --port 12100
- name: Run configuration diagnostic on a VM with multiple queries.
text: |
az network watcher run-configuration-diagnostic --resource {VM_ID}
--queries '[
{
"direction": "Inbound", "protocol": "TCP", "source": "12.11.12.14",
"destination": "10.1.1.4", "destinationPort": "12100"
},
{
"direction": "Inbound", "protocol": "TCP", "source": "12.11.12.0/32",
"destination": "10.1.1.4", "destinationPort": "12100"
},
{
"direction": "Outbound", "protocol": "TCP", "source": "12.11.12.14",
"destination": "10.1.1.4", "destinationPort": "12100"
}]'
"""
helps['network watcher show-next-hop'] = """
type: command
short-summary: Get information on the 'next hop' of a VM.
long-summary: >
Requires that Network Watcher is enabled for the region in which the VM is located.
For more information about show-next-hop visit https://docs.microsoft.com/azure/network-watcher/network-watcher-check-next-hop-cli
examples:
- name: Get the next hop from a VMs assigned IP address to a destination at 10.1.0.4.
text: az network watcher show-next-hop -g MyResourceGroup --vm MyVm --source-ip 10.0.0.4 --dest-ip 10.1.0.4
"""
helps['network watcher show-security-group-view'] = """
type: command
short-summary: Get detailed security information on a VM for the currently configured network security group.
long-summary: >
For more information on using security group view visit https://docs.microsoft.com/azure/network-watcher/network-watcher-security-group-view-cli
examples:
- name: Get the network security group information for the specified VM.
text: az network watcher show-security-group-view -g MyResourceGroup --vm MyVm
"""
helps['network watcher show-topology'] = """
type: command
short-summary: Get the network topology of a resource group, virtual network or subnet.
long-summary: For more information about using network topology visit https://docs.microsoft.com/azure/network-watcher/network-watcher-topology-cli
parameters:
- name: --resource-group -g
short-summary: The name of the target resource group to perform topology on.
- name: --location -l
short-summary: Location. Defaults to the location of the target resource group.
long-summary: >
Topology information is only shown for resources within the target
resource group that are within the specified region.
examples:
- name: Use show-topology to get the topology of resources within a resource group.
text: az network watcher show-topology -g MyResourceGroup
"""
helps['network watcher test-connectivity'] = """
type: command
short-summary: Test if a connection can be established between a Virtual Machine and a given endpoint.
long-summary: >
To check connectivity between two VMs in different regions, use the VM ids instead of the VM names for the source and destination resource arguments.
To register for this feature or see additional examples visit https://docs.microsoft.com/azure/network-watcher/network-watcher-connectivity-cli
parameters:
- name: --source-resource
short-summary: Name or ID of the resource from which to originate traffic.
long-summary: Currently only Virtual Machines are supported.
- name: --source-port
short-summary: Port number from which to originate traffic.
- name: --dest-resource
short-summary: Name or ID of the resource to receive traffic.
long-summary: Currently only Virtual Machines are supported.
- name: --dest-port
short-summary: Port number on which to receive traffic.
- name: --dest-address
short-summary: The IP address or URI at which to receive traffic.
examples:
- name: Check connectivity between two virtual machines in the same resource group over port 80.
text: az network watcher test-connectivity -g MyResourceGroup --source-resource MyVmName1 --dest-resource MyVmName2 --dest-port 80
- name: Check connectivity between two virtual machines in the same subscription in two different resource groups over port 80.
text: az network watcher test-connectivity --source-resource MyVmId1 --dest-resource MyVmId2 --dest-port 80
"""
helps['network watcher test-ip-flow'] = """
type: command
short-summary: Test IP flow to/from a VM given the currently configured network security group rules.
long-summary: >
Requires that Network Watcher is enabled for the region in which the VM is located.
For more information visit https://docs.microsoft.com/azure/network-watcher/network-watcher-check-ip-flow-verify-cli
parameters:
- name: --local
short-summary: >
The private IPv4 address for the VMs NIC and the port of the packet in
X.X.X.X:PORT format. `*` can be used for port when direction is outbound.
- name: --remote
short-summary: >
The IPv4 address and port for the remote side of the packet
X.X.X.X:PORT format. `*` can be used for port when the direction is inbound.
- name: --direction
short-summary: Direction of the packet relative to the VM.
- name: --protocol
short-summary: Protocol to test.
examples:
- name: Run test-ip-flow verify to test logical connectivity from a VM to the specified destination IPv4 address and port.
text: |
az network watcher test-ip-flow -g MyResourceGroup --direction Outbound \\
--protocol TCP --local 10.0.0.4:* --remote 10.1.0.4:80 --vm MyVm
"""
helps['network watcher troubleshooting'] = """
type: group
short-summary: Manage Network Watcher troubleshooting sessions.
long-summary: >
For more information on configuring troubleshooting visit https://docs.microsoft.com/azure/network-watcher/network-watcher-troubleshoot-manage-cli
"""
helps['network watcher troubleshooting show'] = """
type: command
short-summary: Get the results of the last troubleshooting operation.
examples:
- name: Show the results or status of a troubleshooting operation for a Vnet Gateway.
text: az network watcher troubleshooting show -g MyResourceGroup --resource MyVnetGateway --resource-type vnetGateway
"""
helps['network watcher troubleshooting start'] = """
type: command
short-summary: Troubleshoot issues with VPN connections or gateway connectivity.
parameters:
- name: --resource-type -t
short-summary: The type of target resource to troubleshoot, if resource ID is not specified.
- name: --storage-account
short-summary: Name or ID of the storage account in which to store the troubleshooting results.
- name: --storage-path
short-summary: Fully qualified URI to the storage blob container in which to store the troubleshooting results.
examples:
- name: Start a troubleshooting operation on a VPN Connection.
text: |
az network watcher troubleshooting start -g MyResourceGroup --resource MyVPNConnection \\
--resource-type vpnConnection --storage-account MyStorageAccount \\
--storage-path https://{storageAccountName}.blob.core.windows.net/{containerName}
"""
helps['network list-service-aliases'] = """
type: command
short-summary: List available service aliases in the region which can be used for Service Endpoint Policies.
examples:
- name: List available service aliases in the region which can be used for Service Endpoint Policies. (autogenerated)
text: |
az network list-service-aliases --location westus2
crafted: true
"""
helps['network bastion'] = """
type: group
short-summary: Manage Azure bastion host.
"""
helps['network bastion create'] = """
type: command
short-summary: Create a Azure bastion host machine.
examples:
- name: Create a Azure bastion host machine. (autogenerated)
text: |
az network bastion create --location westus2 --name MyBastionHost --public-ip-address MyPublicIpAddress --resource-group MyResourceGroup --vnet-name MyVnet
crafted: true
"""
helps['network bastion delete'] = """
type: command
short-summary: Delete a Azure bastion host machine.
examples:
- name: Delete a Azure bastion host machine. (autogenerated)
text: |
az network bastion delete --name MyBastionHost --resource-group MyResourceGroup
crafted: true
"""
helps['network bastion list'] = """
type: command
short-summary: List all Azure bastion host machines.
"""
helps['network bastion show'] = """
type: command
short-summary: Show a Azure bastion host machine.
examples:
- name: Show a Azure bastion host machine (autogenerated)
text: |
az network bastion show --name MyBastionHost --resource-group MyResourceGroup
crafted: true
"""
helps['network security-partner-provider'] = """
type: group
short-summary: Manage Azure security partner provider.
"""
helps['network security-partner-provider create'] = """
type: command
short-summary: Create a Azure security partner provider.
"""
helps['network security-partner-provider update'] = """
type: command
short-summary: Update a Azure security partner provider.
"""
helps['network security-partner-provider delete'] = """
type: command
short-summary: Delete a Azure security partner provider.
"""
helps['network security-partner-provider list'] = """
type: command
short-summary: List all Azure security partner provider.
"""
helps['network security-partner-provider show'] = """
type: command
short-summary: Show a Azure security partner provider.
"""
helps['network virtual-appliance'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance.
"""
helps['network virtual-appliance create'] = """
type: command
short-summary: Create an Azure network virtual appliance.
examples:
- name: Create an Azure network virtual appliance.
text: |
az network virtual-appliance create -n MyName -g MyRG --vhub {vhubID} --vendor "barracudasdwanrelease" --scale-unit 2 -v latest --asn 10000 --init-config "echo $hello" --boot-blobs {blobUrl1} {blobUrl2} --cloud-blobs {blobUrl3} {blobUrl4}
"""
helps['network virtual-appliance update'] = """
type: command
short-summary: Update an Azure network virtual appliance.
examples:
- name: Update an Azure network virtual appliance.
text: |
az network virtual-appliance update -n MyName -g MyRG --asn 20000 --init-config "echo $hello"
"""
helps['network virtual-appliance show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance.
"""
helps['network virtual-appliance list'] = """
type: command
short-summary: List all Azure network virtual appliance.
"""
helps['network virtual-appliance delete'] = """
type: command
short-summary: Delete an Azure network virtual appliance.
"""
helps['network virtual-appliance site'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance Site.
"""
helps['network virtual-appliance site create'] = """
type: command
short-summary: Create an Azure network virtual appliance site.
examples:
- name: Create an Azure network virtual appliance site.
text: |
az network virtual-appliance site create -n MyName -g MyRG --appliance-name MyAppliance --address-prefix 10.0.0.0/24 --allow --default --optimize
"""
helps['network virtual-appliance site update'] = """
type: command
short-summary: Update an Azure network virtual appliance site.
examples:
- name: Update an Azure network virtual appliance site.
text: |
az network virtual-appliance site update -n MyName -g MyRG --appliance-name MyAppliance --address-prefix 10.0.0.0/24 --allow false --default false --optimize false
"""
helps['network virtual-appliance site show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance site.
"""
helps['network virtual-appliance site list'] = """
type: command
short-summary: List all Azure network virtual appliance site.
"""
helps['network virtual-appliance site delete'] = """
type: command
short-summary: Delete an Azure network virtual appliance site.
"""
helps['network virtual-appliance sku'] = """
type: group
short-summary: Manage Azure Network Virtual Appliance Sku.
"""
helps['network virtual-appliance sku show'] = """
type: command
short-summary: Show the detail of an Azure network virtual appliance sku.
"""
helps['network virtual-appliance sku list'] = """
type: command
short-summary: List all Azure network virtual appliance sku.
"""
| 39.193676 | 693 | 0.730932 |
3e6eb26bdee7b331141ff872bae71decca11e900 | 2,796 | py | Python | problems/speed_of_light.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 29 | 2019-07-23T16:51:36.000Z | 2022-03-08T21:42:05.000Z | problems/speed_of_light.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 44 | 2019-03-22T00:05:32.000Z | 2021-05-04T13:25:12.000Z | problems/speed_of_light.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 3 | 2019-08-04T13:06:21.000Z | 2021-04-20T07:41:42.000Z | import logging
from typing import Tuple
from numpy.random import uniform
from problems.test_case import TestCase, TestCaseTypeEnum
from problems.solutions.speed_of_light import light_time
logger = logging.getLogger(__name__)
FUNCTION_NAME = "light_time"
INPUT_VARS = ['distance']
OUTPUT_VARS = ['time']
STATIC_RESOURCES = []
PHYSICAL_CONSTANTS = {
# Source: https://en.wikipedia.org/wiki/Speed_of_light
'c': 299792458, # speed of light [m/s]
# All distances in meters.
# Time-averaged distance between the Earth and lunar surfaces. The average distance between the Earth and Moon is
# 384,400 km but the radius of the Earth is 6,371 km and the radius of the Moon is 1,737 km, so in this case light
# actually travels 384,400 - 6,371, - 1,737 = 376,292 km
# Source: https://en.wikipedia.org/wiki/Lunar_distance_(astronomy)
'd_Earth_Moon': 376292e3, # 376,292 km
# 1 Astronomical unit (almost equal to the average of Earth's aphelion and perihelion).
# Source: https://en.wikipedia.org/wiki/Astronomical_unit
'd_Sun_Earth': 149597870700.0, # ~150 million kilometres
# Maximum distance between the Earth and Mars.
# Source: https://www.space.com/14729-spacekids-distance-earth-mars.html
'd_Earth_Mars': 401e9, # ~401 million km
# Maximum distance between Earth and Jupiter.
# Source: https://www.universetoday.com/14514/how-far-is-jupiter-from-earth/
'd_Earth_Jupiter': 928e9, # ~928 million km
}
ATOL = {}
RTOL = {
'time': 1e-5
}
class TestCaseType(TestCaseTypeEnum):
EARTH_TO_MOON = ("Earth to moon", 1)
SUN_TO_EARTH = ("Sun to Earth", 1)
MAX_EARTH_TO_MARS = ("Earth to Mars", 1)
MAX_EARTH_TO_JUPITER = ("Earth to Jupiter", 1)
RANDOM = ("Random", 1)
class ProblemTestCase(TestCase):
def input_tuple(self) -> tuple:
return self.input['distance'],
def output_tuple(self) -> tuple:
return self.output['time'],
def generate_test_case(test_type: TestCaseType) -> ProblemTestCase:
test_case = ProblemTestCase(test_type)
if test_type is TestCaseType.EARTH_TO_MOON:
distance = PHYSICAL_CONSTANTS['d_Earth_Moon']
elif test_type is TestCaseType.SUN_TO_EARTH:
distance = PHYSICAL_CONSTANTS['d_Sun_Earth']
elif test_type is TestCaseType.MAX_EARTH_TO_MARS:
distance = PHYSICAL_CONSTANTS['d_Earth_Mars']
elif test_type is TestCaseType.MAX_EARTH_TO_JUPITER:
distance = PHYSICAL_CONSTANTS['d_Earth_Jupiter']
elif test_type is TestCaseType.RANDOM:
c = PHYSICAL_CONSTANTS['c']
distance = uniform(c, 60*c)
else:
raise ValueError(f"Unrecognized test case: {test_type}")
test_case.input['distance'] = distance
test_case.output['time'] = light_time(distance)
return test_case
| 31.41573 | 118 | 0.70887 |
36686dc5ddea0bd07453acb21bea157854ade437 | 18,530 | py | Python | tap_gmail_csv/gmail_client/client.py | food-spotter/tap-gmail-csv | e3f866b462b60ad87b885a7fdcd0945fdb68779a | [
"Apache-2.0"
] | null | null | null | tap_gmail_csv/gmail_client/client.py | food-spotter/tap-gmail-csv | e3f866b462b60ad87b885a7fdcd0945fdb68779a | [
"Apache-2.0"
] | null | null | null | tap_gmail_csv/gmail_client/client.py | food-spotter/tap-gmail-csv | e3f866b462b60ad87b885a7fdcd0945fdb68779a | [
"Apache-2.0"
] | null | null | null | import pickle
import base64
import os.path
import email
from io import BytesIO
from typing import List, Dict, Generator, Iterable, Optional, Union, Set
from googleapiclient.discovery import build, Resource
from google.oauth2.credentials import Credentials
from bs4 import BeautifulSoup
from tap_gmail_csv.gmail_client.models import Message, File, Attachment, Url
class GoogleAPICredentialsNotFound(Exception):
pass
class GoogleAPICredentialsAreAnInvalidFormat(Exception):
pass
class GmailClient:
"""
Simple GMail client wrapper that supports operations to search a GMail Inbox,
retrieve message attachments and body content.
Abstract out GMail library representations into more simple dataclasses
for representations of a `Message`, `File` and `Attachment`
Raises:
GoogleAPICredentialsAreAnInvalidFormat: [description]
GoogleAPICredentialsNotFound: [description]
"""
DEFAULT_API_VERSION = "v1"
def __init__(self, auth_token_path: str, user_id: str = "me", api_version: str = DEFAULT_API_VERSION):
self.user_id = user_id
self.service = GmailClient._connect(auth_token_path, api_version)
@staticmethod
def _get_credentials(token_path: str) -> Credentials:
"""
Load credentials from a pickle file
Arguments:
token_path {str} -- path to the pickle file
Raises:
GoogleAPICredentialsAreAnInvalidFormat: [description]
GoogleAPICredentialsNotFound: [description]
Returns:
Credentials
"""
creds = None
if os.path.exists(token_path):
try:
with open(token_path, "rb") as token:
creds = pickle.load(token)
except Exception as e:
print(e)
raise GoogleAPICredentialsAreAnInvalidFormat(e)
else:
raise GoogleAPICredentialsNotFound(f"File {token_path} was not found.")
return creds
@staticmethod
def _create_client(creds: Credentials, api_version: str, cache_discovery: bool = False) -> Resource:
"""
Creates the resource to interact with the GMail API
Arguments:
creds {Credentials} -- Credentials loaded from a pickle file.
api_version {str} -- which version of the GMail API to use.
Keyword Arguments:
cache_discovery {bool} -- whether or not to cache the discovery doc (default: {False})
Returns:
Resource -- the resource to interact with the GMail API.
"""
return build("gmail", api_version, credentials=creds, cache_discovery=cache_discovery)
@staticmethod
def _connect(auth_token_path: str, api_version: str) -> Resource:
"""
Authenticate and return a GMail API resource to interact with.
The pickle file can be generated as per instructions:
https://developers.google.com/gmail/api/quickstart/python
Arguments:
auth_token_path {str} -- path to a pickle file.
api_version {str} -- which version of the GMail API to use.
Returns:
Resource -- the resource to interact with the GMail API.
"""
creds = GmailClient._get_credentials(auth_token_path)
service = GmailClient._create_client(creds, api_version)
return service
@staticmethod
def _convert_to_attachment(message_id: str, part: dict) -> Union[Attachment, None]:
"""
Convert the given `parts` portion of a raw GMail response to an `Attachment` object if
it's a valid attachment.
Returns `None` otherwise.
Arguments:
message_id {str} -- id of the GMail message.
part {dict} -- parts section of a raw gmail response.
Returns:
Union[Attachment, None]
"""
attachment_obj = None
if part["filename"] and part.get("body", {}).get("attachmentId"):
attachment_obj = Attachment(message_id, part["body"]["attachmentId"], part["filename"])
return attachment_obj
@staticmethod
def _convert_to_attachment_list(message: dict) -> List[Attachment]:
"""
Gets a List of `Attachment`s for a given raw GMail response.
Arguments:
message {dict} -- the raw GMail response from the `messages` resource.
Returns:
List[Attachment]
"""
attachments = []
for part in message.get("payload", {}).get("parts", []):
attachment_obj = GmailClient._convert_to_attachment(message["id"], part)
if attachment_obj:
attachments.append(attachment_obj)
return attachments
@staticmethod
def _convert_to_url_list(message: dict) -> List[Url]:
"""
Gets a unique List of `Url`s for a given raw GMail response.
Searches for `"text/html"` and `"text/plain"` content and extracts the href
attribute value of all html anchor tags.
Arguments:
message {dict} -- the raw GMail response from the `messages` resource.
Returns:
List[Url] - unique list of all href links inside the html portion of the message text
"""
url_set = set()
parts = message.get("payload", {}).get("parts", [])
for part in parts:
if part.get("mimeType", "") == "text/html":
raw_data = part.get("body", {}).get("data", b"")
html_content = base64.urlsafe_b64decode(raw_data).decode("UTF-8")
url_set.update(GmailClient._extract_href_from_html(html_content))
if part.get("mimeType", "") == "text/plain":
raw_data = part.get("body", {}).get("data", b"")
html_content = base64.urlsafe_b64decode(raw_data).decode("UTF-8")
url_set.update(GmailClient._extract_href_from_html(html_content))
return [Url(message["id"], link) for link in url_set]
@staticmethod
def _extract_href_from_html(html_content: str) -> Set[str]:
"""
Returns all unique href content from given HTML document.
Arguments:
html_content {str}
Returns:
Set[str] -- unique set of urls
"""
url_set = set()
soup = BeautifulSoup(html_content, "html.parser")
matches = soup.find_all("a", href=True)
for m in matches:
url_set.add(m.get("href"))
return url_set
@staticmethod
def _find_in_header(message: dict, key: str) -> Optional[str]:
"""
Extract the key value from the header data of a raw GMail api response of a `messages` resource.
Lookup is case insensitive.
Arguments:
message {dict} -- the raw GMail response from the `messages` resource.
key {str} -- the key to lookup
Returns:
Optional[str] -- the value for the given key. `None` if not found
"""
value = None
key = key.lower()
for header in message.get("payload", {}).get("headers", []):
if header.get("name").lower() == key:
value = header.get("value")
break
return value
@staticmethod
def _convert_to_message_obj(message: dict) -> Message:
message_id = message.get("id", "")
label_ids = message.get("labelIds")
internal_date = message.get("internalDate", 0)
attachment_list = GmailClient._convert_to_attachment_list(message)
url_list: List[str] = GmailClient._convert_to_url_list(message)
email_to = GmailClient._find_in_header(message, "To")
email_from = GmailClient._find_in_header(message, "From")
email_subject = GmailClient._find_in_header(message, "Subject")
return Message(
message_id, internal_date, label_ids, attachment_list, url_list, email_to, email_from, email_subject
)
def _get_messages(self, message_list: Iterable) -> Generator[Message, None, None]:
"""
List of Message objects for a given iterable of message_ids, usually in the form of
[{id=message_ids, thread_id=anotherIdThatWeDontCare}]
Arguments:
message_list {Iterable} -- An iterable that contains the low level google gmail message resource
represented like:
`{"id": "2715e11441a6d424", "threadId": "2715e11441a6d424"}`
Yields:
Generator[Message, None, None] -- [description]
"""
# @TODO to support url link
for m in message_list:
message_dict = self._raw_gmail_message(m["id"])
yield self._convert_to_message_obj(message_dict)
def search(
self,
search_query: str = "",
label_ids: List[str] = None,
include_spam_trash: bool = False,
results_per_page: int = None,
max_search_results: int = None,
) -> Generator[Message, None, None]:
"""
Search GMail inbox.
Keyword Arguments:
search_query {str} -- String used to filter messages returned. (default: {""})
label_ids {List[str]} -- List of strings to specify which labels to query against. (default: {None})
include_spam_trash {bool} -- To include messages in spam and trash. (default: {False})
results_per_page {int} -- number of results to get back per page request. (default: {None})
max_search_results {int} -- limit the number of results. (default: {None})
Returns:
Generator[Message, None, None] -- Generator of Message objects matching your search criteria.
"""
message_ids = self._raw_search_gmail(
search_query, label_ids, include_spam_trash, results_per_page, max_search_results
)
return self._get_messages(message_ids)
def _raw_search_gmail( # pragma: no cover
self,
search_query: str = "",
label_ids: List[str] = None,
include_spam_trash: bool = False,
results_per_page: int = None,
max_search_results: int = None,
) -> Generator[Dict[str, str], None, None]:
"""
Searches GMail for given criteria.
Yields an Iterable of a raw GMail `message` resource: (`{message_ids, thread_id}`)
for the matching search query.
Keyword Arguments:
search_query {str} -- String used to filter messages returned. (default: {""})
label_ids {List[str]} -- List of strings to specify which labels to query against. (default: {None})
include_spam_trash {bool} -- To include messages in spam and trash. (default: {False})
results_per_page {int} -- number of results to get back per page request. (default: {None})
max_search_results {int} -- limit the number of results. (default: {None})
Yields:
Generator -- a low level google gmail message resource contains only an id and a threadId represented like:
`{"id": "2715e11441a6d424", "threadId": "2715e11441a6d424"}`
"""
message_count = 0
response = None
page_token = None
while response is None or "nextPageToken" in response:
if response:
page_token = response["nextPageToken"]
response = self._raw_gmail_list_messages(
search_query=search_query,
label_ids=label_ids,
include_spam_trash=include_spam_trash,
results_per_page=results_per_page,
max_search_results=max_search_results,
page_token=page_token,
)
if response.get("messages"):
for message in response["messages"]:
if max_search_results and message_count >= max_search_results:
return
message_count += 1
yield message
def _raw_gmail_list_messages( # pragma: no cover
self,
search_query: str,
label_ids: Optional[List[str]],
include_spam_trash: Optional[bool],
results_per_page: Optional[int],
max_search_results: Optional[int],
page_token: Optional[str],
) -> Dict:
"""
Lists the messages in the GMail mailbox for given criteria. Example response format:
https://developers.google.com/gmail/api/v1/reference/users/messages/list#response_1
Arguments:
search_query {str} -- String used to filter messages returned.
label_ids {Optional[List[str]]} -- List of strings to specify which labels to query against.
include_spam_trash {Optional[bool]} -- To include messages in spam and trash.
results_per_page {Optional[int]} -- number of results to get back per page request.
max_search_results {Optional[int]} -- limit the number of results.
page_token {Optional[str]} -- Page token to retrieve a specific page of results in the list.
Returns:
Dict -- as is response from gmail api
Reference:
https://developers.google.com/gmail/api/v1/reference/users/messages/list
"""
response = (
self.service.users()
.messages()
.list(
userId=self.user_id,
q=search_query,
labelIds=label_ids,
includeSpamTrash=include_spam_trash,
maxResults=results_per_page,
pageToken=page_token,
)
.execute()
)
return response
def _raw_gmail_message(self, message_id: str, return_format: str = "full") -> Dict: # pragma: no cover
"""
Get raw GMail `Users.messages` resource for the given message id. Example response format:
https://developers.google.com/gmail/api/v1/reference/users/messages#resource
Args:
message_id: ID of message to get.
format: The format to return the message in. Acceptable values are:
"full": Returns the full email message data with body content parsed
in the payload field;the raw field is not used. (default).
"metadata": Returns only email message ID, labels, and email headers.
"minimal": Returns only email message ID and labels; does not return
the email headers, body, or payload.
"raw": Returns the full email message data with body content in the raw
field as a base64url encoded string; the payload field is not used.
Returns:
Dict -- as is response from gmail api
Reference:
https://developers.google.com/gmail/api/v1/reference/users/messages/get
"""
message = (
self.service.users().messages().get(userId=self.user_id, id=message_id, format=return_format).execute()
)
return message
def _raw_gmail_attachment(self, message_id: str, attachment_id: str) -> Dict: # pragma: no cover
"""
Get raw GMail ` Users.messages.attachments` resource for the given message id
and attachment id. Example response format:
https://developers.google.com/gmail/api/v1/reference/users/messages/attachments#resource
Arguments:
message_id {str} -- ID of message that the attachment belongs to.
attachment_id {str} -- ID of attachment to get.
Returns:
Dict -- as is response from gmail api
Reference:
https://developers.google.com/gmail/api/v1/reference/users/messages/attachments/get
"""
attachment = (
self.service.users()
.messages()
.attachments()
.get(userId=self.user_id, messageId=message_id, id=attachment_id)
.execute()
)
return attachment
# this is a bit useless at the moment and will need to be zapped at some point
# def get_attachments_from_message(self, message: Dict, filetype_filter: str = None) -> Generator:
# """For a given gmail message response (non-raw), get all attachments
# Arguments:
# message {Dict} -- gmail api response for a message
# Keyword Arguments:
# filetype_filter {str} -- filtering filename (default: {None})
# Yields:
# Generator -- [description]
# """
# for part in message["payload"]["parts"]:
# if part["filename"] and (
# filetype_filter is None or filetype_filter.lower().endswith(filetype_filter.lower())
# ):
# attachment = self._raw_gmail_attachment(message["id"], part["body"]["attachmentId"])
# file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8"))
# # TODO: preserve file content as bytes. E.g. when a binary file format is attached [zip, xls, xlsx]
# yield File(part["filename"], BytesIO(file_data))
@staticmethod
def extract_message_content_from_body(message: dict) -> List[email.message.Message]:
"""
Extract body content of raw email response.
It attempts to strip out non useful stuff and bring back the html and txt of the email body
Returns:
List[email.message.Message] -- [description]
"""
msg_str = base64.urlsafe_b64decode(message["raw"].encode("UTF-8"))
mime_msg = email.message_from_bytes(msg_str)
messageMainType = mime_msg.get_content_maintype()
message_content = []
if messageMainType == "multipart":
for part in mime_msg.get_payload():
if part.get_content_maintype() == "multipart":
message_content = part.get_payload()
elif messageMainType == "text":
message_content = mime_msg.get_payload()
return message_content
| 39.849462 | 120 | 0.598057 |
91c3490ba9646f104c9401b39337fcaefa2d487d | 3,337 | py | Python | bcs-ui/backend/uniapps/application/serializers.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/uniapps/application/serializers.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/uniapps/application/serializers.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import re
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
class StringListField(serializers.ListField):
def to_internal_value(self, data):
if isinstance(data, str):
data = re.findall(r'[^,; ]+', data)
elif isinstance(data, list):
data = data
data = list(set(data))
return super(StringListField, self).to_internal_value(data)
class BaseParams(serializers.Serializer):
pass
class InstanceParams(BaseParams):
version_id = serializers.CharField(label=u"模板版本", required=True)
namespace = StringListField(child=serializers.CharField(min_length=1), min_length=1, required=True)
class UpdateInstanceParams(BaseParams):
"""滚动升级需要参数"""
oper_type = serializers.ChoiceField(label=u"操作类型", choices=[("Recreate", u"重新创建"), ("RollingUpdate", u"滚动升级")])
delete_num = serializers.IntegerField(label=u"周期删除数", required=True)
add_num = serializers.IntegerField(label=u"周期新增数", required=True)
interval_time = serializers.IntegerField(label=u"更新间隔", required=True)
oper_order = serializers.ChoiceField(label=u"滚动顺序", choices=[("CreateFirst", u"先创建"), ("DeleteFirst", u"先删除")])
version_id = serializers.IntegerField(label=u"版本ID", required=True)
version = serializers.CharField(label=u"版本号", required=False)
show_version_id = serializers.IntegerField(required=True)
class ResourceInfoSLZ(serializers.Serializer):
resource_kind = serializers.CharField(required=False)
name = serializers.CharField(required=False)
namespace = serializers.CharField(required=False)
cluster_id = serializers.CharField(required=False)
def validate(self, data):
if not data.get("name"):
return data
if not (data.get("resource_kind") and data.get("namespace") and data.get("cluster_id")):
raise ValidationError(_("参数【name】的值不为空时,参数【resource_kind】【namespace】【cluster_id】的值不能为空"))
return data
class BatchDeleteResourceSLZ(serializers.Serializer):
resource_list = serializers.ListField(child=ResourceInfoSLZ(), required=False)
inst_id_list = serializers.ListField(child=serializers.IntegerField(required=False), required=False)
def validate(self, data):
if not (data.get("resource_list") or data.get("inst_id_list")):
raise ValidationError(_("参数【resource_list】和【inst_id_list】不能同时为空"))
return data
class ReschedulePodsSLZ(serializers.Serializer):
resource_list = serializers.ListField(child=ResourceInfoSLZ())
| 41.7125 | 115 | 0.740485 |
092dd9e752235361ab50063012a505da00c496fe | 16,536 | py | Python | reconcile/test/test_openshift_base.py | fishi0x01/qontract-reconcile | 3d9d27812feabb2ad1a94fa3afdbfd7482cbb8a8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_openshift_base.py | fishi0x01/qontract-reconcile | 3d9d27812feabb2ad1a94fa3afdbfd7482cbb8a8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_openshift_base.py | fishi0x01/qontract-reconcile | 3d9d27812feabb2ad1a94fa3afdbfd7482cbb8a8 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Any, cast
import pytest
import yaml
import reconcile.openshift_base as sut
import reconcile.utils.openshift_resource as resource
from reconcile.test.fixtures import Fixtures
from reconcile.utils import oc
from reconcile.utils.semver_helper import make_semver
fxt = Fixtures("namespaces")
TEST_INT = "test_openshift_resources"
TEST_INT_VER = make_semver(1, 9, 2)
def build_resource(kind: str, api_version: str, name: str) -> dict[str, Any]:
return {
"kind": kind,
"apiVersion": api_version,
"metadata": {
"name": name,
},
}
@pytest.fixture
def resource_inventory() -> resource.ResourceInventory:
return resource.ResourceInventory()
@pytest.fixture
def namespaces() -> list[dict[str, Any]]:
return [fxt.get_anymarkup("valid-ns.yml")]
@pytest.fixture
def oc_cs1() -> oc.OCNative:
return cast(oc.OCNative, oc.OC(cluster_name="cs1", server="", token="", local=True))
@pytest.fixture
def oc_map(mocker, oc_cs1: oc.OCNative) -> oc.OC_Map:
def get_cluster(cluster: str, privileged: bool = False):
if cluster == "cs1":
return oc_cs1
else:
return (
oc.OCLogMsg(
log_level=logging.DEBUG, message=f"[{cluster}] cluster skipped"
),
)
oc_map = mocker.patch("reconcile.utils.oc.OC_Map", autospec=True).return_value
oc_map.get.mock_add_spec(oc.OC_Map.get)
oc_map.get.side_effect = get_cluster
return oc_map
#
# init_specs_to_fetch tests
#
def test_only_cluster_or_namespace(
resource_inventory: resource.ResourceInventory, oc_map: oc.OC_Map
) -> None:
with pytest.raises(KeyError):
sut.init_specs_to_fetch(
ri=resource_inventory,
oc_map=oc_map,
namespaces=[{"foo": "bar"}],
clusters=[{"name": "cs1"}],
)
def test_no_cluster_or_namespace(
resource_inventory: resource.ResourceInventory, oc_map: oc.OC_Map
) -> None:
with pytest.raises(KeyError):
sut.init_specs_to_fetch(
ri=resource_inventory, oc_map=oc_map, namespaces=None, clusters=None
)
def test_namespaces_managed_types(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
managedResourceNames:
- resource: Template
resourceNames:
- tp1
- tp2
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Template",
resource_names=["tp1", "tp2"],
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert rs == expected
def test_namespaces_managed_types_with_resoruce_type_overrides(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
managedResourceNames:
- resource: Template
resourceNames:
- tp1
- tp2
managedResourceTypeOverrides:
- resource: Template
"override": "Template.something.something"
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Template.something.something",
resource_names=["tp1", "tp2"],
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert rs == expected
def test_namespaces_managed_types_no_managed_resource_names(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Template",
resource_names=None,
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert rs == expected
def test_namespaces_no_managed_resource_types(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert not rs
def test_namespaces_resources_names_for_unmanaged_type(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
managedResourceNames:
- resource: Template
resourceNames:
- tp1
- tp2
- resource: Secret
resourceNames:
- s1
- s2
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
with pytest.raises(KeyError):
sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
def test_namespaces_type_override_for_unmanaged_type(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
managedResourceTypeOverrides:
- resource: UnmanagedType
override: UnmanagedType.unmanagedapi
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
with pytest.raises(KeyError):
sut.init_specs_to_fetch(resource_inventory, oc_map, namespaces=[namespace])
def test_namespaces_override_managed_type(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
"""
test that the override_managed_types parameter for init_specs_to_fetch takes
precedence over what might be defined on the namespace. this is relevant for
integrations that specifically handle only a subset of types e.g. terraform-resources
only managing Secrets
"""
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Template
managedResourceNames:
- resource: Template
resourceNames:
- tp1
- tp2
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="LimitRanges",
resource_names=None,
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map=oc_map,
namespaces=[namespace],
override_managed_types=["LimitRanges"],
)
assert rs == expected
registrations = list(resource_inventory)
# make sure only the override_managed_type LimitRange is present
# and not the Template from the namespace
assert len(registrations) == 1
cluster, ns, kind, _ = registrations[0]
assert (cluster, ns, kind) == ("cs1", "ns1", "LimitRanges")
def test_namespaces_managed_fully_qualified_types(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Kind.fully.qualified
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=None,
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert rs == expected
def test_namespaces_managed_fully_qualified_types_with_resource_names(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Kind.fully.qualified
managedResourceNames:
- resource: Kind.fully.qualified
resourceNames:
- n1
- n2
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=["n1", "n2"],
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert rs == expected
def test_namespaces_managed_mixed_qualified_types_with_resource_names(
resource_inventory: resource.ResourceInventory,
oc_map: oc.OC_Map,
oc_cs1: oc.OCNative,
) -> None:
namespace = yaml.safe_load(
"""
name: ns1
cluster:
name: cs1
managedResourceTypes:
- Kind.fully.qualified
- Kind
managedResourceNames:
- resource: Kind.fully.qualified
resourceNames:
- fname
- resource: Kind
resourceNames:
- name
openshiftResources:
- provider: resource
path: /some/path.yml
"""
)
expected: list[sut.StateSpec] = [
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=["fname"],
),
sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind",
resource_names=["name"],
),
sut.DesiredStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
resource={"provider": "resource", "path": "/some/path.yml"},
parent=namespace,
),
]
rs = sut.init_specs_to_fetch(
resource_inventory,
oc_map,
namespaces=[namespace],
)
assert len(expected) == len(rs)
for e in expected:
assert e in rs
#
# populate state tests
#
def test_populate_current_state(
resource_inventory: resource.ResourceInventory, oc_cs1: oc.OCNative
):
"""
test that populate_current_state properly populates the resource inventory
"""
# prepare client and resource inventory
oc_cs1.init_api_resources = True
oc_cs1.api_kind_version = {"Kind": ["fully.qualified/v1", "another.group/v1"]}
oc_cs1.get_items = lambda kind, **kwargs: [
build_resource("Kind", "fully.qualified/v1", "name")
]
resource_inventory.initialize_resource_type("cs1", "ns1", "Kind.fully.qualified")
# process
spec = sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=["name"],
)
sut.populate_current_state(spec, resource_inventory, TEST_INT, TEST_INT_VER)
# verify
cluster, namespace, kind, data = next(iter(resource_inventory))
assert (cluster, namespace, kind) == ("cs1", "ns1", "Kind.fully.qualified")
assert data["current"]["name"] == resource.OpenshiftResource(
build_resource("Kind", "fully.qualified/v1", "name"), TEST_INT, TEST_INT_VER
)
def test_populate_current_state_unknown_kind(
resource_inventory: resource.ResourceInventory, oc_cs1: oc.OCNative, mocker
):
"""
test that a missing kind in the cluster is catched early on
"""
oc_cs1.init_api_resources = True
oc_cs1.api_kind_version = {"Kind": ["some.other.group/v1"]}
get_item_mock = mocker.patch.object(oc.OCNative, "get_items", auto_spec=True)
spec = sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=["name"],
)
sut.populate_current_state(spec, resource_inventory, TEST_INT, TEST_INT_VER)
assert len(list(iter(resource_inventory))) == 0
get_item_mock.assert_not_called()
def test_populate_current_state_resource_name_filtering(
resource_inventory: resource.ResourceInventory, oc_cs1: oc.OCNative, mocker
):
"""
test if the resource names are passed properly to the oc client when fetching items
"""
get_item_mock = mocker.patch.object(oc.OCNative, "get_items", auto_spec=True)
spec = sut.CurrentStateSpec(
oc=oc_cs1,
cluster="cs1",
namespace="ns1",
kind="Kind.fully.qualified",
resource_names=["name1", "name2"],
)
sut.populate_current_state(spec, resource_inventory, TEST_INT, TEST_INT_VER)
get_item_mock.assert_called_with(
"Kind.fully.qualified", namespace="ns1", resource_names=["name1", "name2"]
)
#
# determine_user_key_for_access tests
#
def test_determine_user_key_for_access_github_org():
cluster_info = {"auth": {"service": "github-org"}}
user_key = sut.determine_user_key_for_access(cluster_info)
assert user_key == "github_username"
def test_determine_user_key_for_access_github_org_team():
cluster_info = {"auth": {"service": "github-org-team"}}
user_key = sut.determine_user_key_for_access(cluster_info)
assert user_key == "github_username"
def test_determine_user_key_for_access_oidc():
cluster_info = {"auth": {"service": "oidc"}}
user_key = sut.determine_user_key_for_access(cluster_info)
assert user_key == "org_username"
def test_determine_user_key_for_access_not_implemented():
cluster_info = {"auth": {"service": "not-implemented"}, "name": "c"}
with pytest.raises(NotImplementedError):
sut.determine_user_key_for_access(cluster_info)
| 26.4576 | 89 | 0.597303 |
78cc4c67fd2b307a3cbdc91e66abb3a9ab2e9957 | 21,269 | py | Python | client/apply_annotations.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | client/apply_annotations.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | client/apply_annotations.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree
from typing import (
IO,
Any,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
overload,
)
import libcst as cst
def _get_attribute_as_string(attribute: cst.BaseExpression) -> str:
names = []
while isinstance(attribute, cst.Attribute):
# pyre-fixme[16]: `BaseExpression` has no attribute `value`.
if isinstance(attribute.value.value, cst.Attribute):
value = _get_attribute_as_string(
cst.ensure_type(attribute.value, cst.Attribute).value
)
else:
value = _get_name_as_string(attribute.value.value)
names.append(value)
attribute = attribute.attr
if attribute is not None:
names.append(_get_name_as_string(attribute.value))
return ".".join(names)
def _get_name_as_string(node: Union[cst.CSTNode, str]) -> str:
if isinstance(node, cst.Name):
return node.value
else:
# pyre-fixme[7]: Expected `str` but got `Union[CSTNode, str]`.
return node
class FunctionAnnotation(NamedTuple):
parameters: cst.Parameters
returns: Optional[cst.Annotation]
class ImportStatement(NamedTuple):
module: Union[cst.Name, cst.Attribute]
names: Set[str]
class ImportCollector(cst.CSTVisitor):
def __init__(self) -> None:
self.existing_imports: Set[str] = set()
def visit_Import(self, node: cst.Import) -> None:
for imported_name in node.names:
asname = imported_name.asname
if asname:
self.existing_imports.add(_get_attribute_as_string(asname.name))
else:
self.existing_imports.add(_get_attribute_as_string(imported_name.name))
class TypeCollector(cst.CSTVisitor):
def __init__(self, existing_imports: Set[str]) -> None:
# Qualifier for storing the canonical name of the current function.
self.qualifier: List[str] = []
# Store the annotations.
self.function_annotations: Dict[str, FunctionAnnotation] = {}
self.attribute_annotations: Dict[str, cst.Annotation] = {}
self.imports: Dict[str, ImportStatement] = {}
self.existing_imports: Set[str] = existing_imports
self.class_definitions: Dict[str, cst.ClassDef] = {}
def visit_ClassDef(self, node: cst.ClassDef) -> None:
self.qualifier.append(node.name.value)
self.class_definitions[node.name.value] = node
def leave_ClassDef(self, original_node: cst.ClassDef) -> None:
self.qualifier.pop()
def visit_FunctionDef(self, node: cst.FunctionDef) -> bool:
self.qualifier.append(node.name.value)
returns = node.returns
if returns is not None:
return_annotation = self._create_import_from_annotation(returns)
parameter_annotations = self._import_parameter_annotations(node.params)
self.function_annotations[".".join(self.qualifier)] = FunctionAnnotation(
parameters=parameter_annotations, returns=return_annotation
)
# pyi files don't support inner functions, return False to stop the traversal.
return False
def leave_FunctionDef(self, original_node: cst.FunctionDef) -> None:
self.qualifier.pop()
def visit_AnnAssign(self, node: cst.AnnAssign) -> bool:
# pyre-fixme[16]: `BaseExpression` has no attribute `value`.
self.qualifier.append(node.target.value)
annotation_value = self._create_import_from_annotation(node.annotation)
self.attribute_annotations[".".join(self.qualifier)] = annotation_value
return True
def leave_AnnAssign(self, original_node: cst.AnnAssign) -> None:
self.qualifier.pop()
def visit_ImportFrom(self, node: cst.ImportFrom) -> None:
module = node.module
if module is None or isinstance(node, cst.ImportStar):
return
# pyre-fixme[6]: Expected `List[CSTNode]` for 1st param but got
# `Union[Sequence[ImportAlias], ImportStar]`.
# pyre-fixme[6]: Expected `str` for 1st param but got `Union[BaseExpression,
# str]`.
# pyre-fixme[16]: `None` has no attribute `value`.
self._add_to_imports(node.names, cst.Name(module.value), module.value)
def _add_annotation_to_imports(
self, annotation: cst.Attribute
) -> Union[cst.Name, cst.Attribute]:
key = _get_attribute_as_string(annotation.value)
# Don't attempt to re-import existing imports.
if key in self.existing_imports:
return annotation
self._add_to_imports(
[cst.ImportAlias(name=annotation.attr)], annotation.value, key
)
return annotation.attr
@overload
def _handle_Index(self, slice: cst.Index, node: cst.Subscript) -> cst.Subscript:
pass
def _handle_Index( # noqa
self, slice: cst.Index, node: cst.BaseExpression
) -> cst.BaseExpression:
value = slice.value
if isinstance(value, cst.Subscript):
new_slice = slice.with_changes(value=self._handle_Subscript(value))
return node.with_changes(slice=new_slice)
elif isinstance(value, cst.Attribute):
new_slice = slice.with_changes(value=self._add_annotation_to_imports(value))
return node.with_changes(slice=new_slice)
else:
return node
def _handle_Subscript(self, node: cst.Subscript) -> cst.Subscript:
slice = node.slice
if isinstance(slice, list):
new_slice = []
for item in slice:
value = item.slice.value
if isinstance(value, cst.Attribute):
name = self._add_annotation_to_imports(item.slice.value)
new_index = item.slice.with_changes(value=name)
new_slice.append(item.with_changes(slice=new_index))
else:
if isinstance(item.slice, cst.Index) and not isinstance(
item.slice.value, cst.Name
):
new_index = item.slice.with_changes(
value=self._handle_Index(item.slice, item)
)
item = item.with_changes(slice=new_index, comma=None)
new_slice.append(item)
return node.with_changes(slice=new_slice)
elif isinstance(slice, cst.Index):
return self._handle_Index(slice, node)
else:
return node
def _create_import_from_annotation(self, returns: cst.Annotation) -> cst.Annotation:
annotation = returns.annotation
if isinstance(annotation, cst.Attribute):
attr = self._add_annotation_to_imports(annotation)
return cst.Annotation(annotation=attr)
if isinstance(annotation, cst.Subscript):
value = annotation.value
if isinstance(value, cst.Name) and value.value == "Type":
return returns
return cst.Annotation(annotation=self._handle_Subscript(annotation))
else:
return returns
def _add_to_imports(
self, names: List[cst.ImportAlias], module: cst.BaseExpression, key: str
) -> None:
names_as_string = [_get_name_as_string(name.name) for name in names]
set_names = set(names_as_string)
if key not in self.imports:
# pyre-fixme[6]: Expected `Union[Attribute, Name]` for 2nd param but got
# `BaseExpression`.
self.imports[key] = ImportStatement(names=set_names, module=module)
else:
import_statement = self.imports[key]
for name in set_names:
if name not in import_statement.names:
import_statement.names.add(name)
def _import_parameter_annotations(
self, parameters: cst.Parameters
) -> cst.Parameters:
def update_annotations(parameters: Sequence[cst.Param]) -> List[cst.Param]:
updated_parameters = []
for parameter in list(parameters):
annotation = parameter.annotation
if annotation is not None:
parameter = parameter.with_changes(
annotation=self._create_import_from_annotation(annotation)
)
updated_parameters.append(parameter)
return updated_parameters
return parameters.with_changes(params=update_annotations(parameters.params))
class TypeTransformer(cst.CSTTransformer):
def __init__(
self,
function_annotations: Dict[str, FunctionAnnotation],
attribute_annotations: Dict[str, cst.Annotation],
imports: Dict[str, ImportStatement],
class_definitions: Dict[str, cst.ClassDef],
) -> None:
# Qualifier for storing the canonical name of the current function.
self.qualifier: List[str] = []
# Store the annotations.
self.function_annotations = function_annotations
self.attribute_annotations = attribute_annotations
self.toplevel_annotations: Dict[str, cst.CSTNode] = {}
self.class_definitions = class_definitions
self.visited_classes: Set[str] = set()
self.imports = imports
self.import_statements: List[cst.ImportFrom] = []
self.is_generated: bool = False
def _qualifier_name(self) -> str:
return ".".join(self.qualifier)
def _annotate_single_target(
self, node: cst.Assign, updated_node: cst.Assign
) -> Union[cst.Assign, cst.AnnAssign]:
if isinstance(node.targets[0].target, cst.Tuple):
target = node.targets[0].target
# pyre-fixme[16]: `BaseAssignTargetExpression` has no attribute `elements`.
for element in target.elements:
if not isinstance(element.value, cst.Subscript):
name = _get_name_as_string(element.value.value)
self._add_to_toplevel_annotations(name)
return updated_node
else:
target = node.targets[0].target
# pyre-fixme[16]: `BaseAssignTargetExpression` has no attribute `value`.
name = _get_name_as_string(target.value)
self.qualifier.append(name)
if self._qualifier_name() in self.attribute_annotations and not isinstance(
target, cst.Subscript
):
annotation = self.attribute_annotations[self._qualifier_name()]
self.qualifier.pop()
return cst.AnnAssign(cst.Name(name), annotation, node.value)
else:
self.qualifier.pop()
return updated_node
def _split_module(
self, module: cst.Module, updated_module: cst.Module
) -> Tuple[
List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
]:
import_add_location = 0
# This works under the principle that while we might modify node contents,
# we have yet to modify the number of statements. So we can match on the
# original tree but break up the statements of the modified tree. If we
# change this assumption in this visitor, we will have to change this code.
for i, statement in enumerate(module.body):
if isinstance(statement, cst.SimpleStatementLine):
for possible_import in statement.body:
for last_import in self.import_statements:
if possible_import is last_import:
import_add_location = i + 1
break
return (
list(updated_module.body[:import_add_location]),
list(updated_module.body[import_add_location:]),
)
def _add_to_toplevel_annotations(self, name: str) -> None:
self.qualifier.append(name)
if self._qualifier_name() in self.attribute_annotations:
annotation = self.attribute_annotations[self._qualifier_name()]
self.toplevel_annotations[name] = annotation
self.qualifier.pop()
def _update_parameters(
self, annotations: FunctionAnnotation, updated_node: cst.FunctionDef
) -> cst.Parameters:
# Update params and default params with annotations
# don't override existing annotations or default values
def update_annotation(
parameters: Sequence[cst.Param], annotations: Sequence[cst.Param]
) -> List[cst.Param]:
parameter_annotations = {}
annotated_parameters = []
for parameter in list(annotations):
if parameter.annotation:
parameter_annotations[parameter.name.value] = parameter.annotation
for parameter in list(parameters):
key = parameter.name.value
if key in parameter_annotations and not parameter.annotation:
parameter = parameter.with_changes(
annotation=parameter_annotations[key]
)
annotated_parameters.append(parameter)
return annotated_parameters
return annotations.parameters.with_changes(
params=update_annotation(
updated_node.params.params, annotations.parameters.params
)
)
def _insert_empty_line(
self,
statements: List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
) -> List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]]:
if len(statements) < 1:
# No statements, nothing to add to
return statements
if len(statements[0].leading_lines) == 0:
# Statement has no leading lines, add one!
return [
statements[0].with_changes(leading_lines=(cst.EmptyLine(),)),
*statements[1:],
]
if statements[0].leading_lines[0].comment is None:
# First line is empty, so its safe to leave as-is
return statements
# Statement has a comment first line, so lets add one more empty line
return [
statements[0].with_changes(
leading_lines=(cst.EmptyLine(), *statements[0].leading_lines)
),
*statements[1:],
]
def visit_ClassDef(self, node: cst.ClassDef) -> None:
self.qualifier.append(node.name.value)
self.visited_classes.add(node.name.value)
def leave_ClassDef(
self, original_node: cst.ClassDef, updated_node: cst.ClassDef
) -> cst.ClassDef:
self.qualifier.pop()
return updated_node
def visit_FunctionDef(self, node: cst.FunctionDef) -> bool:
self.qualifier.append(node.name.value)
# pyi files don't support inner functions, return False to stop the traversal.
return False
def leave_FunctionDef(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.FunctionDef:
key = self._qualifier_name()
self.qualifier.pop()
if key in self.function_annotations:
annotations = self.function_annotations[key]
# Only add new annotation if one doesn't already exist
if not updated_node.returns:
updated_node = updated_node.with_changes(returns=annotations.returns)
# Don't override default values when annotating functions
new_parameters = self._update_parameters(annotations, updated_node)
return updated_node.with_changes(params=new_parameters)
return updated_node
def leave_Assign(
self, original_node: cst.Assign, updated_node: cst.Assign
) -> Union[cst.Assign, cst.AnnAssign]:
if len(original_node.targets) > 1:
for assign in original_node.targets:
if not isinstance(assign.target, cst.Subscript):
self._add_to_toplevel_annotations(
# pyre-fixme[16]: `BaseAssignTargetExpression` has no
# attribute `value`.
_get_name_as_string(assign.target.value)
)
return updated_node
else:
return self._annotate_single_target(original_node, updated_node)
def leave_ImportFrom(
self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom
) -> cst.ImportFrom:
self.import_statements.append(original_node)
# pyre-fixme[6]: Expected `Union[Attribute, Name]` for 1st param but got
# `Optional[Union[Attribute, Name]]`.
key = _get_attribute_as_string(original_node.module)
import_names = updated_node.names
module = original_node.module
if (
module is not None
# pyre-fixme[16]: `None` has no attribute `value`.
and module.value in self.imports
and not isinstance(import_names, cst.ImportStar)
):
names_as_string = [_get_name_as_string(name.name) for name in import_names]
updated_names = self.imports[key].names.union(set(names_as_string))
names = [cst.ImportAlias(cst.Name(name)) for name in sorted(updated_names)]
updated_node = updated_node.with_changes(names=tuple(names))
del self.imports[key]
return updated_node
def visit_Comment(self, node: cst.Comment) -> None:
if "@" "generated" in node.value:
self.is_generated = True
def leave_Module(
self, original_node: cst.Module, updated_node: cst.Module
) -> cst.Module:
fresh_class_definitions = [
definition
for name, definition in self.class_definitions.items()
if name not in self.visited_classes
]
if self.is_generated:
return original_node
if (
not self.toplevel_annotations
and not self.imports
and not fresh_class_definitions
):
return updated_node
toplevel_statements = []
# First, find the insertion point for imports
statements_before_imports, statements_after_imports = self._split_module(
original_node, updated_node
)
# Make sure there's at least one empty line before the first non-import
statements_after_imports = self._insert_empty_line(statements_after_imports)
imported = set()
for statement in self.import_statements:
names = statement.names
if isinstance(names, cst.ImportStar):
continue
for name in names:
if name.asname:
name = name.asname
if name:
imported.add(_get_name_as_string(name.name))
for _, import_statement in self.imports.items():
# Filter out anything that has already been imported.
names = import_statement.names.difference(imported)
names = [cst.ImportAlias(cst.Name(name)) for name in sorted(names)]
if not names:
continue
import_statement = cst.ImportFrom(
module=import_statement.module, names=names
)
# Add import statements to module body.
# Need to assign an Iterable, and the argument to SimpleStatementLine
# must be subscriptable.
toplevel_statements.append(cst.SimpleStatementLine([import_statement]))
for name, annotation in self.toplevel_annotations.items():
annotated_assign = cst.AnnAssign(
cst.Name(name),
# pyre-fixme[16]: `CSTNode` has no attribute `annotation`.
cst.Annotation(annotation.annotation),
None,
)
toplevel_statements.append(cst.SimpleStatementLine([annotated_assign]))
toplevel_statements.extend(fresh_class_definitions)
return updated_node.with_changes(
body=[
*statements_before_imports,
*toplevel_statements,
*statements_after_imports,
]
)
def _parse(file: IO[Any]) -> cst.Module: # pyre-fixme[2]
contents = file.read()
return cst.parse_module(contents)
def _annotate_source(stubs: cst.Module, source: cst.Module) -> cst.Module:
import_visitor = ImportCollector()
source.visit(import_visitor)
visitor = TypeCollector(import_visitor.existing_imports)
stubs.visit(visitor)
transformer = TypeTransformer(
visitor.function_annotations,
visitor.attribute_annotations,
visitor.imports,
visitor.class_definitions,
)
return source.visit(transformer)
def apply_stub_annotations(stub_path: str, file_path: str) -> str:
with open(stub_path) as stub_file, open(file_path) as source_file:
stubs = _parse(stub_file)
source = _parse(source_file)
modified_tree = _annotate_source(stubs, source)
return modified_tree.code
| 40.589695 | 88 | 0.628332 |
7ea29e303b3adee10026aa5dc908c49273ba5fd1 | 244 | py | Python | user_management/users/urls.py | abdulwahed-dev/django | f104c90fc3dd94e868a841234d5585b9ce5a61d7 | [
"MIT"
] | 1 | 2022-03-23T15:15:33.000Z | 2022-03-23T15:15:33.000Z | user_management/users/urls.py | abdulwahed-dev/django | f104c90fc3dd94e868a841234d5585b9ce5a61d7 | [
"MIT"
] | null | null | null | user_management/users/urls.py | abdulwahed-dev/django | f104c90fc3dd94e868a841234d5585b9ce5a61d7 | [
"MIT"
] | null | null | null |
from django.urls import path
from .views import home, RegisterView # Import the view here
urlpatterns = [
path('', home, name='users-home'),
path('register/', RegisterView.as_view(), name='users-register'), # This is what we added
] | 30.5 | 94 | 0.692623 |
85214c9242f451caeb6c20e658d6a5212184255a | 3,150 | py | Python | filter_plugins/listoflists.py | chepazzo/ansible-filters-extras | 2373d66089cb6497a29577c9de2a705ec9a67ea2 | [
"BSD-2-Clause"
] | 1 | 2016-06-22T06:23:58.000Z | 2016-06-22T06:23:58.000Z | filter_plugins/listoflists.py | chepazzo/ansible-filters-extras | 2373d66089cb6497a29577c9de2a705ec9a67ea2 | [
"BSD-2-Clause"
] | null | null | null | filter_plugins/listoflists.py | chepazzo/ansible-filters-extras | 2373d66089cb6497a29577c9de2a705ec9a67ea2 | [
"BSD-2-Clause"
] | null | null | null | '''
This is a collection of filters that deals with lists of lists
'''
from __future__ import absolute_import
from ansible import errors
import itertools
class FilterModule(object):
''' Class to make filters available to Ansible '''
def filters(self):
''' List of filters to import into Ansible '''
return {
'collapse': collapse,
'collapse_dict': collapse_dict,
'expand_ranges': expand_ranges,
}
def collapse(stuff):
'''
collapse will take a list of list and return a single list
This is similar to using with_flattened, except that this module
can be used inside of a jinja2 template.
Args:
stuff (list): List of lists that you need to collapse. Usually, this passed via pipe.
Returns:
list: A combined flattened list.
'''
return list(itertools.chain.from_iterable(stuff))
def collapse_dict(stuff):
'''
collapse will take a dict of lists and return a single list
This is similar to using with_flattened, except that this module
can be used inside of a jinja2 template.
Args:
stuff (dict): Dict of lists that you need to collapse. Usually, this passed via pipe.
Returns:
list: A combined flattened list.
Example:
vars:
users:
apps:
- name: www-data
id: 33
people:
- name: mike
id: 1001
- name: bob
id: 1002
tasks:
- name: show me stuff
debug: {{users|collapse_dict}}
returns: [{'name': 'www-data', 'id': 33}, {'name': 'mike', 'id': 1001}, {'name': 'bob', 'id': 1002}]
'''
return list(itertools.chain.from_iterable(stuff.values()))
def expand_ranges(stuff,field='name'):
'''
Expands lists with embedded ranges to a single list.
Args:
stuff (list): List of dicts with ranges. Usually, this passed via pipe.
field (Optional[str]): Name of field that is expected to have a value of 'range'
for items that need to be expanded.
This is also the field that will be populated with the output of the expanded range.
Returns:
list: Unified list with ranges expanded to multiple items.
Example:
Playbook Example::
---
vars:
ints:
- name: range
prefix: "ge-0/1/"
range: [0,4]
- name: ge-1/0/0
- name: ge-2/0/0
tasks:
- name: expand_ranges
debug: var={{ item.name }}
with_items:
ints|expand_ranges('name')
'''
ret = []
for s in stuff:
if field not in s:
ret.append(s)
continue
if s[field] != 'range':
ret.append(s)
continue
prefix = s.get('prefix','')
suffix = s.get('suffix','')
for num in range(*s['range']):
thing = {}
thing.update(s)
thing[field] = "%s%s%s"%(prefix,num,suffix)
ret.append(thing)
return ret
| 27.155172 | 104 | 0.552698 |
6e646c02b344a3c96d38b25862a43d8250b4520f | 33,856 | py | Python | flow/util/genMetrics.py | mathursanjiv/OpenROAD-flow-scripts | 99889aac8cfbe6c96da2001e43f71d4522f144e2 | [
"BSD-3-Clause"
] | null | null | null | flow/util/genMetrics.py | mathursanjiv/OpenROAD-flow-scripts | 99889aac8cfbe6c96da2001e43f71d4522f144e2 | [
"BSD-3-Clause"
] | null | null | null | flow/util/genMetrics.py | mathursanjiv/OpenROAD-flow-scripts | 99889aac8cfbe6c96da2001e43f71d4522f144e2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# This scripts attempts to extract relevant data from a completed flow design
# and save it into a 'metadata.json'. It achieves this by looking for specific
# information in specific files using regular expressions
# -----------------------------------------------------------------------------
import os
from sys import exit
from datetime import datetime, timedelta
from collections import defaultdict
from uuid import uuid4 as uuid
from subprocess import check_output, call, STDOUT
import argparse
import json
import pandas as pd
import re
# make sure the working dir is flow/
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)) , '..'))
# Parse and validate arguments
# =============================================================================
def parse_args():
parser = argparse.ArgumentParser(
description='Generates metadata from OpenROAD flow')
parser.add_argument('--flowPath',
'-f',
required=False,
default='./',
help='Path to the flow directory')
parser.add_argument('--design',
'-d',
required=False,
default='all_designs',
help='Design Name for metrics')
parser.add_argument('--flowVariant',
'-v',
required=False,
default='base',
help='FLOW_VARIANT for the design')
parser.add_argument('--platform',
'-p',
required=False,
default='nangate45',
help='Design Platform')
parser.add_argument('--comment',
'-c',
required=False,
default='',
help='Additional comments to embed')
parser.add_argument('--output',
'-o',
required=False,
default='metadata.json',
help='Output file')
parser.add_argument('--hier',
'-x',
action='store_true',
help='Hierarchical JSON')
args = parser.parse_args()
if not os.path.isdir(args.flowPath):
print('[ERROR] flowPath does not exist')
print('Path: ' + args.flowPath)
exit(1)
return args
# Functions
# =============================================================================
# Main function to do specific extraction of patterns from a file
# This function will look for a regular expression 'pattern' in a 'file', and
# set the key, 'jsonTag', to the value found. The specific 'occurrence' selects
# which occurrence it uses (default -1, i.e., last). If pattern not found, it
# will print an error and set the value to N/A. If a 'defaultNotFound' is set,
# it will use that instead. If count is set to True, it will return the count
# of the pattern.
def extractTagFromFile(jsonTag, jsonFile, pattern, file, count=False,
occurrence=-1, defaultNotFound='N/A', t=str,
required=True):
if jsonTag in jsonFile:
print('[WARN] Overwriting Tag', jsonTag)
# Open file
try:
searchFilePath = os.path.join(args.flowPath, file)
with open(searchFilePath) as f:
content = f.read()
parsedMetrics = re.findall(pattern, content, re.M)
patternNotFound = (len(parsedMetrics) < abs(occurrence))
if patternNotFound and not required:
jsonFile[jsonTag] = defaultNotFound
return
if parsedMetrics:
if count:
# Return the count
jsonFile[jsonTag] = len(parsedMetrics)
else:
# Note: This gets the specified occurrence
value = parsedMetrics[occurrence]
if isinstance(value, tuple):
value = value[arrayPos]
value = value.strip()
try:
jsonFile[jsonTag] = float(value)
except BaseException:
jsonFile[jsonTag] = str(value)
else:
# Only print a warning if the defaultNotFound is not set
print('[WARN] Tag {} not found in {}.'.format(jsonTag,
searchFilePath),
'Will use {}.'.format(defaultNotFound))
jsonFile[jsonTag] = defaultNotFound
except IOError:
print('[ERROR] Failed to open file:', searchFilePath)
jsonFile[jsonTag] = 'ERR'
def extractGnuTime(prefix, jsonFile, file):
extractTagFromFile(
prefix +
'__runtime__total',
jsonFile,
'^Elapsed time: (\S+)\[h:\]min:sec. Average CPU: \S+. Peak memory: \S+KB.',
file)
extractTagFromFile(
prefix +
'__cpu__total',
jsonFile,
'^Elapsed time: \S+\[h:\]min:sec. Average CPU: (\S+). Peak memory: \S+KB.',
file)
extractTagFromFile(
prefix +
'__mem__peak',
jsonFile,
'^Elapsed time: \S+\[h:\]min:sec. Average CPU: \S+. Peak memory: (\S+)KB.',
file)
#
# Extract Clock Latency, Skew numbers
# Need to extract these from native json
#
def get_skew_latency(file_name):
f = None
try:
f = open(file_name, 'r')
except IOError:
print('[WARN] Failed to open file:', file_name)
return ('ERR', 'ERR', 'ERR')
lines = f.readlines()
f.close()
latency_section = False
latency_max = latency_min = skew = 0.0
worst_latency_max = worst_latency_min = worst_skew = 0.0
for line in lines:
if len(line.split()) < 1:
continue
if line.startswith('Latency'):
latency_section = True
continue
if latency_section and len(line.split()) == 1:
latency_max = float(line.split()[0])
continue
if latency_section and len(line.split()) > 2:
latency_min = float(line.split()[0])
skew = float(line.split()[2])
if skew > worst_skew:
worst_skew = skew
worst_latency_max = latency_max
worst_latency_min = latency_min
latency_section = False
return(worst_latency_max, worst_latency_min, worst_skew)
#
# Extract clock info from sdc file
#
def read_sdc(file_name):
clkList = []
sdcFile = None
try:
sdcFile = open(file_name, 'r')
except IOError:
print('[WARN] Failed to open file:', file_name)
return clkList
lines = sdcFile.readlines()
sdcFile.close()
for line in lines:
if len(line.split()) < 2:
continue
if line.split()[0] == 'create_clock':
clk_idx = line.split().index('-name')
clkName = line.split()[clk_idx + 1]
period_idx = line.split().index('-period')
period = line.split()[period_idx + 1]
clk = '%s: %s' % (clkName, period)
clkList.append(clk)
clkList.sort()
return clkList
# Main
# =============================================================================
def is_git_repo(folder=None):
cmd = ['git', 'branch']
if folder is not None:
return call(cmd, stderr=STDOUT, stdout=open(os.devnull, 'w'),
cwd=folder) == 0
else:
return call(cmd, stderr=STDOUT, stdout=open(os.devnull, 'w')) == 0
def extract_metrics(cwd, platform, design, flow_variant, output, hier_json):
baseRegEx = '^{}\n^-*\n^{}'
logPath = os.path.join(cwd, 'logs', platform, design, flow_variant)
rptPath = os.path.join(cwd, 'reports', platform, design, flow_variant)
resultPath = os.path.join(cwd, 'results', platform, design, flow_variant)
metrics_dict = defaultdict(dict)
metrics_dict['run__flow__generate__date'] = now.strftime('%Y-%m-%d %H:%M')
metrics_dict['run__flow__metrics__version'] = 'Metrics_2.1'
cmdOutput = check_output(['openroad', '-version'])
cmdFields = [x.decode('utf-8') for x in cmdOutput.split()]
metrics_dict['run__flow__openroad__version'] = str(cmdFields[0])
if len(cmdFields) > 1:
metrics_dict['run__flow__openroad__commit'] = str(cmdFields[1])
else:
metrics_dict['run__flow__openroad__commit'] = 'N/A'
if is_git_repo():
cmdOutput = check_output(['git', 'rev-parse', 'HEAD'])
cmdOutput = cmdOutput.decode('utf-8').strip()
else:
cmdOutput = 'not a git repo'
print('[WARN]', cmdOutput)
metrics_dict['run__flow__scripts__commit'] = cmdOutput
metrics_dict['run__flow__uuid'] = str(uuid())
metrics_dict['run__flow__design'] = design
metrics_dict['run__flow__platform'] = platform
platformDir = os.environ.get('PLATFORM_DIR')
if platformDir is None:
print('[INFO]', 'PLATFORM_DIR env variable not set')
cmdOutput = 'N/A'
elif is_git_repo(folder=platformDir):
cmdOutput = check_output(['git', 'rev-parse', 'HEAD'], cwd=platformDir)
cmdOutput = cmdOutput.decode('utf-8').strip()
else:
print('[WARN]', 'not a git repo')
cmdOutput = 'N/A'
metrics_dict['run__flow__platform__commit'] = cmdOutput
metrics_dict['run__flow__variant'] = flow_variant
extractTagFromFile('run__flow__platform__timing_units',
metrics_dict,
'^ time (\S+)',
logPath + '/2_1_floorplan.log')
extractTagFromFile('run__flow__platform__power_units',
metrics_dict,
'^ power (\S+)',
logPath + '/2_1_floorplan.log')
extractTagFromFile('run__flow__platform__distance_units',
metrics_dict,
'^ distance (\S+)',
logPath + '/2_1_floorplan.log')
# Synthesis
# =========================================================================
extractTagFromFile('synth__design__instance__stdcell__count',
metrics_dict,
'Number of cells: +(\S+)',
rptPath + '/synth_stat.txt')
extractTagFromFile('synth__design__instance__stdcell__area',
metrics_dict,
'Chip area for module.*: +(\S+)',
rptPath + '/synth_stat.txt')
extractGnuTime('synth', metrics_dict, logPath + '/1_1_yosys.log')
# Clocks
# =========================================================================
clk_list = read_sdc(resultPath + '/2_floorplan.sdc')
metrics_dict['constraints__clocks__count'] = len(clk_list)
metrics_dict['constraints__clocks__details'] = clk_list
# Floorplan
# =========================================================================
extractTagFromFile('floorplan__design__restruct__stdcell__count__pre',
metrics_dict,
'number instances before restructure is (\d+)',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__restruct__stdcell__count__post',
metrics_dict,
'number instances after restructure is (\d+)',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__restruct__stdcell__area__pre',
metrics_dict,
'^Design area (\S+) u\^2',
logPath + '/2_1_floorplan.log',
occurrence=-2,
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__restruct__stdcell__area__post',
metrics_dict,
'^Design area (\S+) u\^2',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__timing__setup__tns',
metrics_dict,
baseRegEx.format('floorplan final report_tns',
'tns (\S+)'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__timing__setup__ws',
metrics_dict,
baseRegEx.format('floorplan final report_worst_slack',
'worst slack (\S+)'),
logPath + '/2_1_floorplan.log',
occurrence=0)
extractTagFromFile('floorplan__design__instance__stdcell__area',
metrics_dict,
baseRegEx.format('floorplan final report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__design__instance__design__util',
metrics_dict,
baseRegEx.format('floorplan final report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__design__io__count',
metrics_dict,
'Number of I/O +(\d+)',
logPath + '/3_2_place_iop.log')
extractTagFromFile('floorplan__design__instance__macros__count',
metrics_dict,
'Found (\S+) macros.',
logPath + '/2_4_mplace.log',
defaultNotFound=0)
extractGnuTime('floorplan', metrics_dict, logPath + '/2_4_mplace.log')
# Place
# =========================================================================
extractTagFromFile('globalplace__route__wirelength__estimated',
metrics_dict,
'Total wirelength: (\S+)',
logPath + '/3_1_place_gp.log')
extractTagFromFile('globalplace__timing__setup__tns',
metrics_dict,
baseRegEx.format('global place report_tns',
'tns (\S+)'),
logPath + '/3_1_place_gp.log')
extractTagFromFile('globalplace__timing__setup__ws',
metrics_dict,
baseRegEx.format('global place report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_1_place_gp.log')
extractGnuTime('globalplace', metrics_dict, logPath + '/3_1_place_gp.log')
extractTagFromFile('placeopt__timing__setup__tns',
metrics_dict,
baseRegEx.format('resizer report_tns', 'tns (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__setup__ws',
metrics_dict,
baseRegEx.format('resizer report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__design__area',
metrics_dict,
baseRegEx.format('resizer report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__design__util',
metrics_dict,
baseRegEx.format('resizer report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__slew__violation__count',
metrics_dict,
baseRegEx.format('resizer max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__fanout__violation__count',
metrics_dict,
baseRegEx.format('resizer max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__max_cap__violation__count',
metrics_dict,
baseRegEx.format('resizer max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__stdcell__count',
metrics_dict,
'^instance_count\n-*\n^(\S+)',
logPath + '/3_3_resizer.log')
extractGnuTime('placeopt', metrics_dict, logPath + '/3_3_resizer.log')
extractTagFromFile('detailedplace__design__violations__count',
metrics_dict,
'^\[INFO FLW-0012\] Placement violations (\S+).',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__timing__setup__tns',
metrics_dict,
baseRegEx.format('detailed place report_tns',
'tns (\S+)'),
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__timing__setup__ws',
metrics_dict,
baseRegEx.format('detailed place report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__design__instance__displacement__total',
metrics_dict,
'total displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__design__instance__displacement__mean',
metrics_dict,
'average displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__desgin__instance__displacement__max',
metrics_dict,
'max displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__route__wirelength__estimated',
metrics_dict,
'legalized HPWL +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractGnuTime('detailedplace', metrics_dict, logPath + '/3_4_opendp.log')
# CTS
# =========================================================================
latency_max, latency_min, skew = get_skew_latency(logPath + '/4_1_cts.log')
metrics_dict['cts__clock__latency__min'] = latency_min
metrics_dict['cts__clock__latency__max'] = latency_max
metrics_dict['cts__clock__skew__worst'] = skew
extractTagFromFile('cts__timing__setup__tns__prerepair',
metrics_dict,
baseRegEx.format('cts pre-repair report_tns',
'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__slew__violation__count__prerepair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__fanout__violation__count__prerepair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__max_cap__violation__count__prerepair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws__prerepair',
metrics_dict,
baseRegEx.format('cts pre-repair report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__tns__postrepair',
metrics_dict,
baseRegEx.format('cts post-repair report_tns',
'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws__postrepair',
metrics_dict,
baseRegEx.format('cts post-repair report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__slew__violation__count__postrepair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__fanout__violation__count__postrepair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__max_cap__violation__count__postrepair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__tns', metrics_dict,
baseRegEx.format('cts final report_tns', 'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__tns',
metrics_dict,
baseRegEx.format('cts final report_tns', 'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws',
metrics_dict,
baseRegEx.format('cts final report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__slew__violation__count',
metrics_dict,
baseRegEx.format('cts final max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__fanout__violation__count',
metrics_dict,
baseRegEx.format('cts final max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__design__instance__hold_buffer__count',
metrics_dict,
'Inserted (\d+) hold buffers',
logPath + '/4_1_cts.log',
defaultNotFound=0)
# Route
# =========================================================================
logFile = logPath + '/5_1_fastroute.log'
latency_max, latency_min, skew = get_skew_latency(logFile)
metrics_dict['globalroute__clock__latency__min'] = latency_min
metrics_dict['globalroute__clock__latency__max'] = latency_max
metrics_dict['globalroute__clock__skew__worst'] = skew
extractTagFromFile('globalroute__timing__setup__tns',
metrics_dict,
baseRegEx.format('global route report_tns',
'tns (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__setup__ws',
metrics_dict,
baseRegEx.format('global route report_worst_slack',
'worst slack (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__slew__violation__count',
metrics_dict,
baseRegEx.format(
'global route max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__fanout__violation__count',
metrics_dict,
baseRegEx.format(
'global route max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__max_cap__violation__count',
metrics_dict,
baseRegEx.format('global route max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__clock__slack',
metrics_dict,
'^\[INFO FLW-....\] Clock .* slack (\S+)',
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__clock__period',
metrics_dict,
'^\[INFO FLW-....\] Clock .* period (\S+)',
logPath + '/5_1_fastroute.log')
extractGnuTime('globalroute', metrics_dict, logPath + '/5_1_fastroute.log')
extractTagFromFile('detailedroute__route__wirelength',
metrics_dict,
'Total wire length = +(\S+) um.',
logPath + '/5_2_TritonRoute.log')
extractTagFromFile('detailedroute__route__via__count',
metrics_dict,
'Total number of vias = +(\S+).',
logPath + '/5_2_TritonRoute.log')
extractTagFromFile('detailedroute__route__drc_errors__count',
metrics_dict,
'(?i)violation',
rptPath + '/5_route_drc.rpt',
count=True, defaultNotFound=0)
extractGnuTime('detailedroute',
metrics_dict,
logPath + '/5_2_TritonRoute.log')
# Finish
# =========================================================================
logFile = logPath + '/6_report.log'
latency_max, latency_min, skew = get_skew_latency(logFile)
metrics_dict['finish__clock__latency__min'] = latency_min
metrics_dict['finish__clock__latency__max'] = latency_max
metrics_dict['finish__clock__skew__worst'] = skew
extractTagFromFile('finish__timing__setup__tns',
metrics_dict,
baseRegEx.format('finish report_tns', 'tns (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__setup__ws',
metrics_dict,
baseRegEx.format('finish report_worst_slack',
'worst slack (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__slew__violation__count',
metrics_dict,
baseRegEx.format('finish max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__fanout__violation__count',
metrics_dict,
baseRegEx.format('finish max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__max_cap__violation__count',
metrics_dict,
baseRegEx.format('finish max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__power__internal__total',
metrics_dict,
'Total +(\S+) +\S+ +\S+ +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__switch__total',
metrics_dict,
'Total +\S+ +(\S+) +\S+ +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__leakage__total',
metrics_dict,
'Total +\S+ +\S+ +(\S+) +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__total',
metrics_dict,
'Total +\S+ +\S+ +\S+ +(\S+) +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__design__instance__area',
metrics_dict,
baseRegEx.format('finish report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/6_report.log')
extractTagFromFile('finish__design__instance__utilization',
metrics_dict,
baseRegEx.format('finish report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/6_report.log')
extractGnuTime('finish', metrics_dict, logPath + '/6_report.log')
# Accumulate time
# =========================================================================
failed = False
total = timedelta()
for key in metrics_dict:
if key.endswith('__runtime__total'):
# Big try block because Hour and microsecond is optional
try:
t = datetime.strptime(metrics_dict[key], '%H:%M:%S.%f')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%M:%S.%f')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%H:%M:%S')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%M:%S')
except ValueError:
failed = True
break
delta = timedelta(hours=t.hour, minutes=t.minute,
seconds=t.second, microseconds=t.microsecond)
total += delta
if failed:
metrics_dict['total_time'] = 'ERR'
else:
metrics_dict['total_time'] = str(total)
metrics_df = pd.DataFrame(list(metrics_dict.items()))
col_index = metrics_df.iloc[0][1] + '__' + metrics_df.iloc[1][1]
metrics_df.columns = ['Metrics', col_index]
if hier_json:
# Convert the Metrics dictionary to hierarchical format by stripping
# the stage as a 'key'
hier_dict = defaultdict(dict)
for metric in metrics_dict:
key_list = metric.split('__', 1)
if len(key_list) == 2:
hier_dict[key_list[0]][key_list[1]] = metrics_dict[metric]
metrics_dict = hier_dict
with open(output, 'w') as resultSpecfile:
json.dump(metrics_dict, resultSpecfile, indent=2)
return metrics_dict, metrics_df
args = parse_args()
now = datetime.now()
if args.design == 'all_designs':
print('List of designs')
rootdir = './logs'
all_df = pd.DataFrame()
all_d = []
flow_variants = args.flowVariant.split()
cwd = os.getcwd()
for platform_it in os.scandir(rootdir):
if not platform_it.is_dir():
continue
plt = platform_it.name
for design_it in os.scandir(platform_it.path):
if not design_it.is_dir():
continue
for variant in flow_variants:
des = design_it.name
print(plt, des, variant)
file = '/'.join(['reports', plt, des, variant, 'metrics.json'])
metrics, df = extract_metrics(cwd, plt, des, variant,
file, args.hier)
all_d.append(metrics)
if all_df.shape[0] == 0:
all_df = df
else:
all_df = all_df.merge(df, on='Metrics', how='inner')
with open('metrics.json', 'w') as outFile:
json.dump(all_d, outFile, indent=2)
with open('metrics.html', 'w') as f:
f.write(all_df.to_html())
else:
metrics_dict, metrics_df = extract_metrics(args.flowPath, args.platform,
args.design, args.flowVariant,
args.output, args.hier)
| 40.692308 | 83 | 0.514207 |
e8bbb6bbed50992bcaa9c9c026723edaf24acdaa | 1,479 | py | Python | src/fleetctrl/charging/ChargingBase.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 19 | 2021-12-11T17:17:00.000Z | 2022-03-24T07:27:06.000Z | src/fleetctrl/charging/ChargingBase.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | null | null | null | src/fleetctrl/charging/ChargingBase.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 1 | 2021-12-21T11:20:39.000Z | 2021-12-21T11:20:39.000Z | from abc import abstractmethod, ABC
import logging
LOG = logging.getLogger(__name__)
class ChargingBase(ABC):
def __init__(self, fleetctrl, operator_attributes, solver="Gurobi"):
"""Initialization of charging class.
:param fleetctrl: FleetControl class
:param operator_attributes: operator dictionary that can contain additionally required parameters
:param solver: solver for optimization problems
"""
self.fleetctrl = fleetctrl
self.cm = fleetctrl.charging_management
self.routing_engine = fleetctrl.routing_engine
self.solver_key = solver
# children classes:
# - check of additionally required attributes from operator_attributes
# - save these as class attributes
def time_triggered_charging_processes(self, sim_time):
"""This method can be used to apply a charging strategy and additionally, charge vehicles in depots if there are
free slots.
:param sim_time: current simulation time
:return: None
"""
# 1a) apply charging strategy
self._call_specific_charging_strategy(sim_time)
# 1b) fill empty charging units at depots
self.cm.fill_charging_units_at_depot(self, sim_time)
@abstractmethod
def _call_specific_charging_strategy(self, sim_time):
"""This method can be used to apply a charging strategy.
:param sim_time:
:return: None
"""
pass
| 34.395349 | 120 | 0.686951 |
fc582b66ce12409fde254083aba2fb73a39b3c0c | 303 | py | Python | src/endpoints/sample/args.py | C-I-D-A-S/python-project-template | 99946bb36a4aa25dcac90a1ff9662ca1a8d45536 | [
"Apache-2.0"
] | null | null | null | src/endpoints/sample/args.py | C-I-D-A-S/python-project-template | 99946bb36a4aa25dcac90a1ff9662ca1a8d45536 | [
"Apache-2.0"
] | null | null | null | src/endpoints/sample/args.py | C-I-D-A-S/python-project-template | 99946bb36a4aa25dcac90a1ff9662ca1a8d45536 | [
"Apache-2.0"
] | null | null | null | """
Module for API arguments config
Author: Po-Chun, Lu
"""
def add_post_args(post_parser):
""" defining post arguments
"""
post_parser.add_argument(
"job_id",
type=str,
required=True,
location="json",
help="job_id parameter should be list",
)
| 16.833333 | 47 | 0.594059 |
7cd73d653bcaab2dbd6c30c08ee291e261d0abfc | 4,252 | py | Python | S4/S4 Library/simulation/objects/components/autonomy.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/objects/components/autonomy.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/objects/components/autonomy.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from autonomy.settings import AutonomyRandomization
from sims4.tuning.tunable import TunableTuple, TunableSet, TunableReference, Tunable, OptionalTunable, TunableEnumEntry
import services
class TunableParameterizedAutonomy(TunableTuple, is_fragment=True):
def __init__(self):
super().__init__(commodities=TunableSet(TunableReference(services.statistic_manager(), description='The type of commodity to search for.'), description='List of commodities to run parameterized autonomy against after running this interaction.'), static_commodities=TunableSet(TunableReference(services.static_commodity_manager(), description='The type of static commodity to search for.'), description='List of static commodities to run parameterized autonomy against after running this interaction.'), same_target_only=Tunable(bool, False, description='If checked, only interactions on the same target as this interaction will be considered.'), retain_priority=Tunable(bool, True, description='If checked, this autonomy request is run at the same priority level as the interaction creating it. If unchecked, the interaction chosen will run at low priority.'), consider_same_target=Tunable(bool, True, description='If checked, parameterized autonomy will consider interactions on the current Target.'), retain_carry_target=Tunable(bool, True, description="If checked, the interactions considered for autonomy will retain this interaction's carry target. It is useful to uncheck this if the desired autonomous interactions need not to consider carry, e.g. the Grim Reaper finding arbitrary interactions while in an interaction holding his scythe as a carry target."), randomization_override=OptionalTunable(description='\n If enabled then the parameterized autonomy will run with\n an overwritten autonomy randomization settings.\n ', tunable=TunableEnumEntry(description='\n The autonomy randomization setting that will be used.\n ', tunable_type=AutonomyRandomization, default=AutonomyRandomization.UNDEFINED)), radius_to_consider=Tunable(description='\n The radius around the sim that targets must be in to be valid for Parameterized \n Autonomy. Anything outside this radius will be ignored. A radius of 0 is considered\n infinite.\n ', tunable_type=float, default=0), consider_scores_of_zero=Tunable(description='\n The autonomy request will consider scores of zero. This allows sims to to choose things they \n might not desire.\n ', tunable_type=bool, default=False), test_connectivity_to_target=Tunable(description='\n If checked, this test will ensure the Sim can pass a pt to\n pt connectivity check to the advertising object.\n ', tunable_type=bool, default=True), retain_context_source=Tunable(description='\n If True, any interactions that run as a result of\n this request will run with the same context source as the creating\n interaction. If False, it will default to InteractionContext.SOURCE_AUTONOMY.\n ', tunable_type=bool, default=False), ignore_user_directed_and_autonomous=Tunable(description='\n If True, parametrized request will ignore autonomous and\n user directed checks. This means, that the request may\n push a user directed or autonomous interaction without\n restriction.\n A use case for this is when a vampire runs pre run autonomy\n to enable its dark form, we want to keep the context as \n user directed (to keep the high priority of the\n interaction), but the interaction being run can normally\n not be user directed (since we dont want it on the pie\n menu). \n ', tunable_type=bool, default=False), description='Commodities and StaticCommodities will be combined, so interactions must support at least one commodity from both lists.')
| 472.444444 | 3,969 | 0.703904 |
f8a8cced9c54b3e1935d9b4451847d493adbf6b0 | 8,006 | py | Python | tfx/dependencies.py | v1incent/tfx | ce6d6f64d326be3d57761a31b60f951fa95a39e5 | [
"Apache-2.0"
] | null | null | null | tfx/dependencies.py | v1incent/tfx | ce6d6f64d326be3d57761a31b60f951fa95a39e5 | [
"Apache-2.0"
] | null | null | null | tfx/dependencies.py | v1incent/tfx | ce6d6f64d326be3d57761a31b60f951fa95a39e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package dependencies for TFX.
tfx and family libraries (such as tensorflow-model-analysis) adopts environment
variable (TFX_DEPENDENCY_SELECTOR) based dependency version selection. This
dependency will be baked in to the wheel, in other words you cannot change
dependency string once wheel is built.
- UNCONSTRAINED uses dependency without any version constraint string, which is
useful when you manually build wheels of parent library (e.g. tfx-bsl) of
arbitrary version, and install it without dependency constraints conflict.
- NIGHTLY uses x.(y+1).0.dev version as a lower version constraint. tfx nightly
will transitively depend on nightly versions of other TFX family libraries,
and this version constraint is required.
- GIT_MASTER uses github master branch URL of the dependency, which is useful
during development, or when depending on the github master HEAD version of
tfx. This is because tfx github master HEAD version is actually using github
master HEAD version of parent libraries.
Caveat: URL dependency is not upgraded with --upgrade flag, and you have to
specify --force-reinstall flag to fetch the latest change from each master
branch HEAD.
- For the release, we use a range of version, which is also used as a default.
"""
import os
def select_constraint(default, nightly=None, git_master=None):
"""Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var."""
selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')
if selector == 'UNCONSTRAINED':
return ''
elif selector == 'NIGHTLY' and nightly is not None:
return nightly
elif selector == 'GIT_MASTER' and git_master is not None:
return git_master
else:
return default
def make_pipeline_sdk_required_install_packages():
return [
'absl-py>=0.9,<2.0.0',
'ml-metadata' + select_constraint(
# LINT.IfChange
default='>=1.6.0,<1.7.0',
# LINT.ThenChange(tfx/workspace.bzl)
nightly='>=1.7.0.dev',
git_master='@git+https://github.com/google/ml-metadata@master'),
'packaging>=20,<21',
'portpicker>=1.3.1,<2',
'protobuf>=3.13,<4',
'docker>=4.1,<5',
'google-apitools>=0.5,<1',
'google-api-python-client>=1.8,<2',
# TODO(b/176812386): Deprecate usage of jinja2 for placeholders.
'jinja2>=2.7.3,<4',
]
def make_required_install_packages():
# Make sure to sync the versions of common dependencies (absl-py, numpy,
# and protobuf) with TF.
return make_pipeline_sdk_required_install_packages() + [
'apache-beam[gcp]>=2.35,<3',
'attrs>=19.3.0,<21',
'click>=7,<8',
'google-cloud-aiplatform>=1.6.2,<2',
'google-cloud-bigquery>=2.26.0,<3',
'grpcio>=1.28.1,<2',
'keras-tuner>=1.0.4,<2',
'kubernetes>=10.0.1,<13',
'numpy>=1.16,<2',
'pyarrow>=1,<6',
'pyyaml>=3.12,<6',
# Keep the TF version same as TFT to help Pip version resolution.
# Pip might stuck in a TF 1.15 dependency although there is a working
# dependency set with TF 2.x without the sync.
# pylint: disable=line-too-long
'tensorflow' + select_constraint(
'>=1.15.5,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<2.9'),
# pylint: enable=line-too-long
'tensorflow-hub>=0.9.0,<0.13',
'tensorflow-data-validation' + select_constraint(
default='>=1.6.0,<1.7.0',
nightly='>=1.7.0.dev',
git_master='@git+https://github.com/tensorflow/data-validation@master'
),
'tensorflow-model-analysis' + select_constraint(
default='>=0.37.0,<0.38',
nightly='>=0.38.0.dev',
git_master='@git+https://github.com/tensorflow/model-analysis@master'),
'tensorflow-serving-api>=1.15,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3',
'tensorflow-transform' + select_constraint(
default='>=1.6.0,<1.7.0',
nightly='>=1.7.0.dev',
git_master='@git+https://github.com/tensorflow/transform@master'),
'tfx-bsl' + select_constraint(
default='>=1.6.0,<1.7.0',
nightly='>=1.7.0.dev',
git_master='@git+https://github.com/tensorflow/tfx-bsl@master'),
]
def make_extra_packages_airflow():
"""Prepare extra packages needed for Apache Airflow orchestrator."""
return [
'apache-airflow[mysql]>=1.10.14,<3',
]
def make_extra_packages_kfp():
"""Prepare extra packages needed for Kubeflow Pipelines orchestrator."""
return [
'kfp>=1.8.5,<2',
'kfp-pipeline-spec>=0.1.10,<0.2',
]
def make_extra_packages_test():
"""Prepare extra packages needed for running unit tests."""
# Note: It is okay to pin packages to exact versions in this list to minimize
# conflicts.
return make_extra_packages_airflow() + make_extra_packages_kfp() + [
'pytest>=5,<7',
]
def make_extra_packages_docker_image():
# Packages needed for tfx docker image.
return [
'kfp-pipeline-spec>=0.1.10,<0.2',
'mmh>=2.2,<3',
'python-snappy>=0.5,<0.6',
# Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py
'tensorflow-cloud>=0.1,<0.2',
'tensorflow-io>=0.9.0, <=0.24.0',
]
def make_extra_packages_tfjs():
# Packages needed for tfjs.
return [
'tensorflowjs>=3.6.0,<4',
]
def make_extra_packages_tf_ranking():
# Packages needed for tf-ranking which is used in tfx/examples/ranking.
return [
'tensorflow-ranking>=0.3.3,<0.4',
'struct2tensor' + select_constraint(
default='>=0.37,<0.38',
nightly='>=0.38.0.dev',
git_master='@git+https://github.com/google/struct2tensor@master'),
]
def make_extra_packages_tfdf():
# Packages needed for tensorflow-decision-forests.
# Required for tfx/examples/penguin/penguin_utils_tfdf_experimental.py
return [
# NOTE: TFDF 0.2.1 is only compatible with TF 2.7.x.
'tensorflow-decision-forests==0.2.1',
]
def make_extra_packages_examples():
# Extra dependencies required for tfx/examples.
return [
# Required for presto ExampleGen custom component in
# tfx/examples/custom_components/presto_example_gen
'presto-python-client>=0.7,<0.8',
# Required for slack custom component in
# tfx/examples/custom_components/slack
'slackclient>=2.8.2,<3',
'websocket-client>=0.57,<1',
# Required for bert examples in tfx/examples/bert
'tensorflow-text>=1.15.1,<3',
# Required for tfx/examples/cifar10
'flatbuffers>=1.12,<3',
'tflite-support>=0.1.0a1,<0.2.1',
# Required for tfx/examples/penguin/experimental
# LINT.IfChange
'scikit-learn>=0.23,<0.24',
# LINT.ThenChange(
# examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py)
# Required for the experimental tfx/examples using Flax, e.g.,
# tfx/examples/penguin.
# TODO(b/193362300): Unblock the version cap after TF 2.7 becomes minimum.
'jax>=0.2.13,<0.2.17',
'jaxlib>=0.1.64,<0.2',
'flax>=0.3.3,<0.4',
# Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py
'tensorflow-cloud>=0.1,<0.2',
]
def make_extra_packages_all():
# All extra dependencies.
return [
*make_extra_packages_test(),
*make_extra_packages_tfjs(),
*make_extra_packages_tf_ranking(),
*make_extra_packages_tfdf(),
*make_extra_packages_examples(),
]
| 36.557078 | 96 | 0.663128 |
3588b8e3e89088f442925be6d65bf8e40587267b | 1,340 | py | Python | app/core/tests/test_admin.py | Kane-Ryans/recipe-app-api | 23f43bdf7cae98209edfc43d81c735fa83994c8e | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | Kane-Ryans/recipe-app-api | 23f43bdf7cae98209edfc43d81c735fa83994c8e | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | Kane-Ryans/recipe-app-api | 23f43bdf7cae98209edfc43d81c735fa83994c8e | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import Client
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@app.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@app.com',
password='password123',
name='test user full name'
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.162791 | 68 | 0.642537 |
ae6c9bea7d918b85f70a41bbf97843d7245ac74b | 1,812 | py | Python | blog-server/blog/initialization/graphql.py | rob-blackbourn/blog-engine | 9b0239996732f377b70c82eb908bae826f35e67c | [
"Apache-2.0"
] | 1 | 2019-05-13T19:40:32.000Z | 2019-05-13T19:40:32.000Z | blog-server/blog/initialization/graphql.py | rob-blackbourn/blog-engine | 9b0239996732f377b70c82eb908bae826f35e67c | [
"Apache-2.0"
] | null | null | null | blog-server/blog/initialization/graphql.py | rob-blackbourn/blog-engine | 9b0239996732f377b70c82eb908bae826f35e67c | [
"Apache-2.0"
] | null | null | null | import aiohttp_cors
from easydict import EasyDict as edict
from graphql.pyutils import EventEmitter
from ..schema import schema
from ..middlewares import AuthenticationMiddleware
from ..repositories import (
AdminRepository,
UserRepository,
BlogRepository,
PostRepository,
CommentRepository
)
from ..resolvers.dataloader import DbDataLoaderRegistry
from ..controllers import GraphQLController
async def startup(app):
authentication = AuthenticationMiddleware(whitelist=[
'__schema',
'registerUser',
'authenticate'
])
db = app['mongo']
config = app['config']
event_emitter = EventEmitter()
repositories = edict(
admin=AdminRepository(),
user=UserRepository(),
blog=BlogRepository(),
post=PostRepository(),
comment=CommentRepository()
)
data_loader_registry = DbDataLoaderRegistry()
for repository in repositories.values():
repository.register_data_loaders(data_loader_registry)
context_builder = lambda request: edict(
config=config,
db=db,
event_emitter=event_emitter,
repositories=repositories,
data_loaders=data_loader_registry.create_loders(db),
request=request
)
middleware = [
authentication
]
controller = GraphQLController(schema, context_builder, middleware)
routes = controller.add_routes(app)
app['graphql'] = controller
# Configure default CORS settings.
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
for route in routes:
cors.add(route)
async def shutdown(app):
await app['graphql'].shutdown()
| 24.16 | 71 | 0.672737 |
ba7e361b5a0327ec85e4f96e9c04a0e4f2e6d78a | 2,355 | py | Python | raidquaza/database/dbhandler.py | Breee/pokemon-discord-report-bot | 48321195e976a34ad52827e16e9d20aed89d39a6 | [
"MIT"
] | 2 | 2019-03-12T16:44:24.000Z | 2020-04-13T21:06:20.000Z | authicuno/database/dbhandler.py | Breee/Authicuno | 1d9fc54be3adb842695a0e6c20f4d3db03f43fd5 | [
"MIT"
] | 5 | 2019-07-13T00:11:42.000Z | 2021-07-29T11:55:39.000Z | raidquaza/database/dbhandler.py | Breee/raidquaza | 308d643e71eddf6f6dc432c01322a02d604ac70e | [
"MIT"
] | null | null | null | import sqlalchemy
from utility.globals import LOGGER
from sqlalchemy.orm import sessionmaker
def transaction_wrapper(func):
def _wrap_func(*args, **kwargs):
self = args[0]
session = sessionmaker(bind=self.engine, expire_on_commit=False)
# new session. no connections are in use.
self.session = session()
try:
# execute transaction statements.
res = func(*args, **kwargs)
# commit. The pending changes above
# are flushed via flush(), the Transaction
# is committed, the Connection object closed
# and discarded, the underlying DBAPI connection
# returned to the connection pool.
self.session.commit()
except Exception as err:
LOGGER.critical(err)
# on rollback, the same closure of state
# as that of commit proceeds.
self.session.rollback()
raise
finally:
# close the Session. This will expunge any remaining
# objects as well as reset any existing SessionTransaction
# state. Neither of these steps are usually essential.
# However, if the commit() or rollback() itself experienced
# an unanticipated internal failure (such as due to a mis-behaved
# user-defined event handler), .close() will ensure that
# invalid state is removed.
self.session.close()
return res
return _wrap_func
class DbHandler(object):
def __init__(self, host, database, port, user, password, dialect, driver):
self.host = host
self.database = database
self.port = port
self.user = user
self.password = password
self.dialect = dialect
self.driver = driver
self.conn = None
self.cursor = None
self.engine = sqlalchemy.create_engine(
'%s+%s://%s:%s@%s:%s/%s' % (dialect, driver, user, password, host, port, database),
pool_pre_ping=True)
self.session = None
self.metadata = sqlalchemy.MetaData()
self.base = None
if __name__ == '__main__':
db = DbHandler(host="localhost", user="pollman", password="bestpw", port="3307", database="polldb",
dialect="mysql", driver="mysqlconnector")
| 36.230769 | 103 | 0.600425 |
de16c45bc946f6c963a17667e14864b49a76bc58 | 3,654 | py | Python | gcloud/tests/apigw/views/test_get_user_project_detail.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | 1 | 2020-08-16T09:21:58.000Z | 2020-08-16T09:21:58.000Z | gcloud/tests/apigw/views/test_get_user_project_detail.py | ZhuoZhuoCrayon/bk-sops | d1475d53c19729915727ce7adc24e3226f15e332 | [
"Apache-2.0"
] | null | null | null | gcloud/tests/apigw/views/test_get_user_project_detail.py | ZhuoZhuoCrayon/bk-sops | d1475d53c19729915727ce7adc24e3226f15e332 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import ujson as json
from pipeline.utils.collections import FancyDict
from gcloud.tests.mock import * # noqa
from gcloud.tests.mock_settings import * # noqa
from gcloud import err_code
from .utils import APITest
TEST_PROJECT_ID = "1"
TEST_PROJECT_NAME = "name"
TEST_BIZ_CC_ID = "2"
TEST_BIZ_NAME = "biz_name"
TEST_BIZ_DEVELOPERS = "TEST_BIZ_DEVELOPERS"
TEST_BIZ_MAINTAINER = "TEST_BIZ_MAINTAINER"
TEST_BIZ_TESTER = "TEST_BIZ_TESTER"
TEST_BIZ_PRODUCTOR = "TEST_BIZ_PRODUCTOR"
class GetUserProjectDetailAPITest(APITest):
def url(self):
return "/apigw/get_user_project_detail/{project_id}/"
@patch(
PROJECT_GET,
MagicMock(
return_value=MockProject(
project_id=TEST_PROJECT_ID,
name=TEST_PROJECT_NAME,
bk_biz_id=TEST_BIZ_CC_ID,
from_cmdb=True,
)
),
)
@patch(
APIGW_GET_USER_PROJECT_DETAIL_GET_BUSINESS_DETAIL,
MagicMock(side_effect=Exception()),
)
def test_get_user_project_detail__get_business_detail_raise(self):
response = self.client.get(path=self.url().format(project_id=TEST_PROJECT_ID))
data = json.loads(response.content)
self.assertFalse(data["result"])
self.assertTrue("message" in data)
self.assertEqual(data["code"], err_code.UNKNOWN_ERROR.code)
@patch(
PROJECT_GET,
MagicMock(
return_value=MockProject(
project_id=TEST_PROJECT_ID,
name=TEST_PROJECT_NAME,
bk_biz_id=TEST_BIZ_CC_ID,
from_cmdb=True,
)
),
)
@patch(
APIGW_GET_USER_PROJECT_DETAIL_GET_BUSINESS_DETAIL,
MagicMock(
return_value=FancyDict(
bk_biz_id=TEST_BIZ_CC_ID,
bk_biz_name=TEST_BIZ_NAME,
bk_biz_developer=TEST_BIZ_DEVELOPERS,
bk_biz_maintainer=TEST_BIZ_MAINTAINER,
bk_biz_tester=TEST_BIZ_TESTER,
bk_biz_productor=TEST_BIZ_PRODUCTOR,
)
),
)
def test_get_user_project_detail__success(self):
response = self.client.get(path=self.url().format(project_id=TEST_PROJECT_ID))
data = json.loads(response.content)
self.assertTrue(data["result"])
self.assertEqual(data["code"], err_code.SUCCESS.code)
self.assertEqual(
data["data"],
{
"project_id": TEST_PROJECT_ID,
"project_name": TEST_PROJECT_NAME,
"from_cmdb": True,
"bk_biz_id": TEST_BIZ_CC_ID,
"bk_biz_name": TEST_BIZ_NAME,
"bk_biz_developer": TEST_BIZ_DEVELOPERS,
"bk_biz_maintainer": TEST_BIZ_MAINTAINER,
"bk_biz_tester": TEST_BIZ_TESTER,
"bk_biz_productor": TEST_BIZ_PRODUCTOR,
},
)
| 33.833333 | 115 | 0.653804 |
627733149441de9d253e4da4111d26f09eb68c8c | 3,024 | py | Python | playground/infrastructure/test_grpc_client.py | ex00/beam | c4d23298a75038899cfc6d05fcea35089f1745f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | playground/infrastructure/test_grpc_client.py | ex00/beam | c4d23298a75038899cfc6d05fcea35089f1745f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | playground/infrastructure/test_grpc_client.py | ex00/beam | c4d23298a75038899cfc6d05fcea35089f1745f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from unittest.mock import AsyncMock
import pytest
from infrastructure.api import api_pb2
from infrastructure.grpc_client import GRPCClient
@pytest.fixture()
def mock_run_code(mocker):
async_mock = AsyncMock(return_value=str(uuid.uuid4()))
mocker.patch('infrastructure.grpc_client.GRPCClient.run_code', side_effect=async_mock)
return async_mock
@pytest.fixture()
def mock_check_status(mocker):
async_mock = AsyncMock(return_value=api_pb2.STATUS_FINISHED)
mocker.patch('infrastructure.grpc_client.GRPCClient.check_status', side_effect=async_mock)
return async_mock
@pytest.fixture()
def mock_get_run_error(mocker):
async_mock = AsyncMock(return_value="MOCK_ERROR")
mocker.patch('infrastructure.grpc_client.GRPCClient.get_run_error', side_effect=async_mock)
return async_mock
@pytest.fixture()
def mock_get_run_output(mocker):
async_mock = AsyncMock(return_value="MOCK_RUN_OUTPUT")
mocker.patch('infrastructure.grpc_client.GRPCClient.get_run_output', side_effect=async_mock)
return async_mock
@pytest.fixture()
def mock_get_compile_output(mocker):
async_mock = AsyncMock(return_value="MOCK_COMPILE_OUTPUT")
mocker.patch('infrastructure.grpc_client.GRPCClient.get_compile_output', side_effect=async_mock)
return async_mock
class TestGRPCClient:
@pytest.mark.asyncio
async def test_run_code(self, mock_run_code):
result = await GRPCClient().run_code("", api_pb2.SDK_GO)
assert isinstance(result, str)
@pytest.mark.asyncio
async def test_check_status(self, mock_check_status):
result = await GRPCClient().check_status(str(uuid.uuid4()))
assert result == api_pb2.STATUS_FINISHED
async def test_get_run_error(self, mock_get_run_error):
result = await GRPCClient().get_run_error(str(uuid.uuid4()))
assert result == "MOCK_ERROR"
async def test_get_run_output(self, mock_get_run_output):
result = await GRPCClient().get_run_output(str(uuid.uuid4()))
assert result == "MOCK_RUN_OUTPUT"
async def test_get_compile_output(self, mock_get_compile_output):
result = await GRPCClient().get_compile_output(str(uuid.uuid4()))
assert result == "MOCK_COMPILE_OUTPUT"
| 36.878049 | 100 | 0.765542 |
fdbda0dbe260fdd4fc43cc1618d8692074ea1079 | 272 | py | Python | config.py | gonzageraci/dydx-bot | d2cd3526c1eb7fbe5d07610510c74a2d428d3c4d | [
"Apache-2.0"
] | 1 | 2021-11-13T20:38:29.000Z | 2021-11-13T20:38:29.000Z | config.py | gonzageraci/dydx-bot | d2cd3526c1eb7fbe5d07610510c74a2d428d3c4d | [
"Apache-2.0"
] | null | null | null | config.py | gonzageraci/dydx-bot | d2cd3526c1eb7fbe5d07610510c74a2d428d3c4d | [
"Apache-2.0"
] | 1 | 2021-12-13T09:22:15.000Z | 2021-12-13T09:22:15.000Z | API_ENDPOINT = "https://api.dydx.exchange"
StarkPrivateKey = "0x6d6300ffa9a563f84fc628251a3499459903354e33c965d83a613d56f1bdd8d"
eth_private_key = "48d5c67094243f36207f0dc21967e005dccca0756d63e77d4e680adf7e4b66fe"
eth_address = "0xb6eDcE2198cC2063906169fD4339B0F15EC558ea" | 68 | 85 | 0.897059 |
19e50da2c50074e2800fd6d970993a294d188c64 | 6,720 | py | Python | ci/util.py | VonRosenchild/plaidml | 762d5fff6467b43a15623f927502892ce8df91a4 | [
"Apache-2.0"
] | 1 | 2019-09-11T11:18:50.000Z | 2019-09-11T11:18:50.000Z | ci/util.py | HubBucket-Team/plaidml | 762d5fff6467b43a15623f927502892ce8df91a4 | [
"Apache-2.0"
] | null | null | null | ci/util.py | HubBucket-Team/plaidml | 762d5fff6467b43a15623f927502892ce8df91a4 | [
"Apache-2.0"
] | 1 | 2019-09-11T11:18:52.000Z | 2019-09-11T11:18:52.000Z | import argparse
import os
import platform
import shutil
import subprocess
import sys
verbose = False # pylint: disable=invalid-name
def printf(*args, **kwargs):
excludes_env = {key: kwargs[key] for key in kwargs if key not in ['env']}
if excludes_env:
print(*args, excludes_env)
else:
print(*args)
sys.stdout.flush()
def call(cmd, **kwargs):
if verbose:
printf(cmd, **kwargs)
return subprocess.call(cmd, **kwargs)
def check_call(cmd, **kwargs):
if verbose:
printf(cmd, **kwargs)
subprocess.check_call(cmd, **kwargs)
def check_output(cmd, **kwargs):
if verbose:
printf(cmd, **kwargs)
return subprocess.check_output(cmd, **kwargs)
class CondaEnv(object):
def __init__(self, path):
self.path = path.absolute().resolve()
if platform.system() == 'Windows':
self.bin = self.path / 'Scripts'
self.python = self.path / 'python.exe'
self.paths = [
str(self.path),
str(self.path / 'Library' / 'mingw-64' / 'bin'),
str(self.path / 'Library' / 'usr' / 'bin'),
str(self.path / 'Library' / 'bin'),
str(self.path / 'Scripts'),
str(self.path / 'bin'),
]
else:
self.bin = self.path / 'bin'
self.python = self.bin / 'python'
self.paths = [str(self.bin)]
def env(self):
env = {
'CONDA_DEFAULT_ENV': str(self.path),
'PATH': os.pathsep.join(self.paths + os.getenv('PATH').split(os.pathsep)),
}
if platform.system() != 'Windows':
env['JAVA_HOME'] = str(self.path)
else:
env['CONDA_EXE'] = str(self.path / 'Scripts' / 'conda.exe')
env['JAVA_HOME'] = str(self.path / 'Library')
return env
def create(self, spec):
try:
if not self.path.exists():
check_call(['conda', 'env', 'create', '-f', str(spec), '-p', str(self.path)])
else:
check_call(
['conda', 'env', 'update', '--prune', '-f',
str(spec), '-p',
str(self.path)])
except:
if self.path.exists():
shutil.rmtree(self.path)
raise
def clone(self, path):
if path.exists():
shutil.rmtree(path)
check_call(['conda', 'create', '--clone', str(self.path), '-p', str(path)])
return CondaEnv(path)
def install(self, package):
check_call([str(self.python), '-m', 'pip', 'install', str(package)])
class DictAction(argparse.Action):
def __init__(self, **kwargs):
def key_value(string):
return string.split('=', 1)
super(DictAction, self).__init__(default={}, type=key_value, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
k, v = values
var = getattr(namespace, self.dest)
var[k] = v
setattr(namespace, self.dest, var)
def first(choices):
for choice in choices:
if choice is not None:
return choice
return None
class PlanOption(object):
def __init__(self, suite, workload, platform):
self._suite = suite
self._workload = workload
self._platform = platform
def get(self, name, default=None):
"""
precedence order for options:
- platform_overrides
- workload
- suite
- default
- None
"""
override = self._workload.get('platform_overrides', {}).get(self._platform, {}).get(name)
return first([
override,
self._workload.get(name),
self._suite.get(name),
default,
])
class Platform(object):
def __init__(self, full, gpu_flops):
parts = full.split('-')
self.full = full
self.framework = parts[0]
self.engine = '_'.join(parts[1:3])
self.gpu = parts[3]
self.gpu_flops = gpu_flops.get(self.gpu)
def __repr__(self):
return '<Platform({})>'.format(self.full)
class TestInfo(object):
def __init__(self, suite, workload, platform, batch_size, variant, popt, shards=1, shard_id=0):
self.suite_name, self.suite = suite
self.workload_name, self.workload = workload
self.platform_name, self.platform = platform
self.batch_size = batch_size
self.variant = variant
self.timeout = popt.get('timeout', 20)
self.retry = popt.get('retry')
self.soft_fail = popt.get('soft_fail')
self.shards = shards
self.shard_id = shard_id
if self.shards > 1:
self.instance_name = '{}-{}'.format(self.workload_name, self.shard_id)
else:
self.instance_name = self.workload_name
def __repr__(self):
return '{}/{}/{}/bs{}'.format(self.suite_name, self.workload_name, self.platform_name,
self.batch_size)
def label(self):
label_parts = [self.platform.gpu, self.workload_name]
if self.batch_size:
label_parts += [str(self.batch_size)]
label_parts += [self.platform.engine]
return '-'.join(label_parts)
def path(self, root):
batch_size = 'BATCH_SIZE={}'.format(self.batch_size)
return root / self.suite_name / self.workload_name / self.platform_name / batch_size
def iterate_tests(plan, pipeline):
gpu_flops = plan['CONST']['gpu_flops']
for skey, suite in plan['SUITES'].items():
for pkey, platform in suite['platforms'].items():
pinfo = plan['PLATFORMS'][pkey]
variant = pinfo['variant']
if pipeline not in platform['pipelines']:
continue
for wkey, workload in suite['workloads'].items():
popt = PlanOption(suite, workload, pkey)
skip = workload.get('skip_platforms', [])
if pkey in skip:
continue
shards = popt.get('shards', 1)
for shard_id in range(shards):
for batch_size in suite['params'][pipeline]['batch_sizes']:
yield TestInfo(
suite=(skey, suite),
workload=(wkey, workload),
platform=(pkey, Platform(pkey, gpu_flops)),
batch_size=batch_size,
variant=variant,
popt=popt,
shard_id=shard_id,
shards=shards,
)
| 31.111111 | 99 | 0.537054 |
e39c010914a118b111904e86e8b07d93c2ba835e | 645 | py | Python | freebilly/domain/PendulumWorkTime.py | emileten/billy | 3fd0ab381382f7e64c67bc37b4ea2555e6b09822 | [
"MIT"
] | null | null | null | freebilly/domain/PendulumWorkTime.py | emileten/billy | 3fd0ab381382f7e64c67bc37b4ea2555e6b09822 | [
"MIT"
] | 1 | 2022-01-20T02:36:03.000Z | 2022-01-20T02:36:03.000Z | freebilly/domain/PendulumWorkTime.py | emileten/billy | 3fd0ab381382f7e64c67bc37b4ea2555e6b09822 | [
"MIT"
] | null | null | null | from freebilly.domain.AbstractWorkTime import AbstractWorkTime
import pendulum as pdl
class PendulumWorkTime(AbstractWorkTime):
def __init__(self, time: pdl.DateTime):
raise NotImplementedError
def now(self) -> AbstractWorkTime:
raise NotImplementedError
def diff(self, other: AbstractWorkTime) -> int:
raise NotImplementedError
def __lt__(self, other: AbstractWorkTime) -> bool:
raise NotImplementedError
def __gt__(self, other: AbstractWorkTime) -> bool:
raise NotImplementedError
def __eq__(self, other: AbstractWorkTime) -> bool:
raise NotImplementedError
| 22.241379 | 62 | 0.716279 |
5b425f42332d87991f208e18ff3686e4d21104b4 | 16,241 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/format_test.py | rotty3000/openapi-generator | 40d3331e789412d8f42df0148cc089a9d330b759 | [
"Apache-2.0"
] | 1 | 2022-01-11T15:49:34.000Z | 2022-01-11T15:49:34.000Z | samples/openapi3/client/petstore/python/petstore_api/model/format_test.py | rotty3000/openapi-generator | 40d3331e789412d8f42df0148cc089a9d330b759 | [
"Apache-2.0"
] | 9 | 2021-11-01T08:59:31.000Z | 2022-03-31T08:31:57.000Z | samples/openapi3/client/petstore/python/petstore_api/model/format_test.py | rotty3000/openapi-generator | 40d3331e789412d8f42df0148cc089a9d330b759 | [
"Apache-2.0"
] | 1 | 2022-03-20T14:46:48.000Z | 2022-03-20T14:46:48.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
class FormatTest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('number',): {
'inclusive_maximum': 543.2,
'inclusive_minimum': 32.1,
'multiple_of': 32.5,
},
('password',): {
'max_length': 64,
'min_length': 10,
},
('integer',): {
'inclusive_maximum': 100,
'inclusive_minimum': 10,
'multiple_of': 2,
},
('int32',): {
'inclusive_maximum': 200,
'inclusive_minimum': 20,
},
('float',): {
'inclusive_maximum': 987.6,
'inclusive_minimum': 54.3,
},
('double',): {
'inclusive_maximum': 123.4,
'inclusive_minimum': 67.8,
},
('string',): {
'regex': {
'pattern': r'[a-z]', # noqa: E501
'flags': (re.IGNORECASE)
},
},
('pattern_with_digits',): {
'regex': {
'pattern': r'^\d{10}$', # noqa: E501
},
},
('pattern_with_digits_and_delimiter',): {
'regex': {
'pattern': r'^image_\d{1,3}$', # noqa: E501
'flags': (re.IGNORECASE)
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'number': (float,), # noqa: E501
'byte': (str,), # noqa: E501
'date': (date,), # noqa: E501
'password': (str,), # noqa: E501
'integer': (int,), # noqa: E501
'int32': (int,), # noqa: E501
'int64': (int,), # noqa: E501
'float': (float,), # noqa: E501
'double': (float,), # noqa: E501
'string': (str,), # noqa: E501
'binary': (file_type,), # noqa: E501
'date_time': (datetime,), # noqa: E501
'uuid': (str,), # noqa: E501
'uuid_no_example': (str,), # noqa: E501
'pattern_with_digits': (str,), # noqa: E501
'pattern_with_digits_and_delimiter': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'number': 'number', # noqa: E501
'byte': 'byte', # noqa: E501
'date': 'date', # noqa: E501
'password': 'password', # noqa: E501
'integer': 'integer', # noqa: E501
'int32': 'int32', # noqa: E501
'int64': 'int64', # noqa: E501
'float': 'float', # noqa: E501
'double': 'double', # noqa: E501
'string': 'string', # noqa: E501
'binary': 'binary', # noqa: E501
'date_time': 'dateTime', # noqa: E501
'uuid': 'uuid', # noqa: E501
'uuid_no_example': 'uuidNoExample', # noqa: E501
'pattern_with_digits': 'pattern_with_digits', # noqa: E501
'pattern_with_digits_and_delimiter': 'pattern_with_digits_and_delimiter', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, number, byte, date, password, *args, **kwargs): # noqa: E501
"""FormatTest - a model defined in OpenAPI
Args:
number (float):
byte (str):
date (date):
password (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
integer (int): [optional] # noqa: E501
int32 (int): [optional] # noqa: E501
int64 (int): [optional] # noqa: E501
float (float): [optional] # noqa: E501
double (float): [optional] # noqa: E501
string (str): [optional] # noqa: E501
binary (file_type): [optional] # noqa: E501
date_time (datetime): [optional] # noqa: E501
uuid (str): [optional] # noqa: E501
uuid_no_example (str): [optional] # noqa: E501
pattern_with_digits (str): A string that is a 10 digit number. Can have leading zeros.. [optional] # noqa: E501
pattern_with_digits_and_delimiter (str): A string starting with 'image_' (case insensitive) and one to three digits following i.e. Image_01.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.number = number
self.byte = byte
self.date = date
self.password = password
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, number, byte, date, password, *args, **kwargs): # noqa: E501
"""FormatTest - a model defined in OpenAPI
Args:
number (float):
byte (str):
date (date):
password (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
integer (int): [optional] # noqa: E501
int32 (int): [optional] # noqa: E501
int64 (int): [optional] # noqa: E501
float (float): [optional] # noqa: E501
double (float): [optional] # noqa: E501
string (str): [optional] # noqa: E501
binary (file_type): [optional] # noqa: E501
date_time (datetime): [optional] # noqa: E501
uuid (str): [optional] # noqa: E501
uuid_no_example (str): [optional] # noqa: E501
pattern_with_digits (str): A string that is a 10 digit number. Can have leading zeros.. [optional] # noqa: E501
pattern_with_digits_and_delimiter (str): A string starting with 'image_' (case insensitive) and one to three digits following i.e. Image_01.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.number = number
self.byte = byte
self.date = date
self.password = password
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.852243 | 178 | 0.539006 |
013015bcf3b77597eaa1ad272ec68e054cb150ac | 515 | py | Python | env/Lib/site-packages/plotly/validators/layout/scene/xaxis/_showexponent.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/layout/scene/xaxis/_showexponent.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/layout/scene/xaxis/_showexponent.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="layout.scene.xaxis", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
| 34.333333 | 84 | 0.64466 |
436feab5ecc5284b6b7b2a58304e91c04c04fa85 | 4,742 | py | Python | usaspending_api/common/tests/test_helpers.py | animatecitizen/usaspending-api | 556511ff51fe80a8be8361daa5876fd684a04efe | [
"CC0-1.0"
] | null | null | null | usaspending_api/common/tests/test_helpers.py | animatecitizen/usaspending-api | 556511ff51fe80a8be8361daa5876fd684a04efe | [
"CC0-1.0"
] | 1 | 2021-11-15T17:54:12.000Z | 2021-11-15T17:54:12.000Z | usaspending_api/common/tests/test_helpers.py | mikepsinn/usaspending-api | ef61e13c286eb51949e16b760fa7516255b2bfd3 | [
"CC0-1.0"
] | null | null | null | import datetime as dt
import re
import time
import pytest
from usaspending_api.common.helpers.generic_helper import fy, get_pagination, timer
legal_dates = {
dt.datetime(2017, 2, 2, 16, 43, 28, 377373): 2017,
dt.date(2017, 2, 2): 2017,
dt.datetime(2017, 10, 2, 16, 43, 28, 377373): 2018,
dt.date(2017, 10, 2): 2018,
}
not_dates = (0, 2017.2, 'forthwith')
def test_pagination():
# Testing for if anything breaks for the special case of an empty list
results = []
empty_page_metadata = {"next": None, "previous": None, "hasNext": False, "hasPrevious": False, "count": 0}
assert get_pagination(results, 1, 1) == ([], {**empty_page_metadata, **{"page": 1}})
assert get_pagination(results, 1, 4) == ([], {**empty_page_metadata, **{"page": 4}})
assert get_pagination(results, 3, 1) == ([], {**empty_page_metadata, **{"page": 1}})
assert get_pagination(results, 3, 2) == ([], {**empty_page_metadata, **{"page": 2}})
assert get_pagination(results, 1, 6) == ([], {**empty_page_metadata, **{"page": 6}})
assert get_pagination(results, 5, 2) == ([], {**empty_page_metadata, **{"page": 2}})
assert get_pagination(results, 1000, 1) == ([], {**empty_page_metadata, **{"page": 1}})
assert get_pagination(results, 1000, 2) == ([], {**empty_page_metadata, **{"page": 2}})
assert get_pagination(results, 0, 1) == ([], {**empty_page_metadata, **{"page": 1}})
assert get_pagination(results, 10, 0) == ([], {**empty_page_metadata, **{"page": 0}})
# Normal tests
results = ["A", "B", "C", "D", "E"]
populated_page_metadata = {"next": 2, "hasNext": True, "count": 5, "page": 1}
assert get_pagination(results, 1, 1) == (["A"], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"next": 5, "hasNext": True, "previous": 3, "hasPrevious": True, "count": 5, "page": 4}
assert get_pagination(results, 1, 4) == (["D"], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"next": 2, "hasNext": True, "count": 5, "page": 1}
assert get_pagination(results, 3, 1) == (["A", "B", "C"], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"previous": 1, "hasPrevious": True, "count": 5, "page": 2}
assert get_pagination(results, 3, 2) == (["D", "E"], {**empty_page_metadata, **populated_page_metadata})
# Testing special cases
populated_page_metadata = {"previous": 5, "hasPrevious": True, "count": 5, "page": 6}
assert get_pagination(results, 1, 6) == ([], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"previous": 1, "hasPrevious": True, "count": 5, "page": 2}
assert get_pagination(results, 5, 2) == ([], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"page": 1, "count": 5}
assert get_pagination(results, 1000, 1) == (["A", "B", "C", "D", "E"],
{**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"previous": 1, "hasPrevious": True, "page": 2, "count": 5}
assert get_pagination(results, 1000, 2) == ([], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"page": 1, "count": 5}
assert get_pagination(results, 0, 1) == ([], {**empty_page_metadata, **populated_page_metadata})
populated_page_metadata = {"page": 0, "count": 5}
assert get_pagination(results, 10, 0) == ([], {**empty_page_metadata, **populated_page_metadata})
@pytest.mark.parametrize("raw_date, expected_fy", legal_dates.items())
def test_fy_returns_integer(raw_date, expected_fy):
assert isinstance(fy(raw_date), int)
@pytest.mark.parametrize("raw_date, expected_fy", legal_dates.items())
def test_fy_returns_correct(raw_date, expected_fy):
assert fy(raw_date) == expected_fy
@pytest.mark.parametrize("not_date", not_dates)
def test_fy_type_exceptions(not_date):
with pytest.raises(TypeError):
fy(not_date)
def test_timer(capsys):
'Verify that timer helper executes without error'
with timer():
print('Doing a thing')
output = capsys.readouterr()[0]
assert 'Beginning' in output
assert 'finished' in output
def test_timer_times(capsys):
'Verify that timer shows longer times for slower operations'
pattern = re.compile(r'([\d\.e\-]+) sec')
with timer():
print('Doing a thing')
output0 = capsys.readouterr()[0]
time0 = float(pattern.search(output0).group(1))
with timer():
print('Doing a slower thing')
time.sleep(0.1)
output1 = capsys.readouterr()[0]
time1 = float(pattern.search(output1).group(1))
assert time1 > time0
def test_fy_none():
assert fy(None) is None
| 44.317757 | 117 | 0.648461 |
517dcdbb1bfa41caeb3d60faf4ecf2e302591403 | 2,349 | py | Python | ospweb/users/tests/test_views.py | asheeq/ospweb | 4aeb3037d8c59d5dcfca2e929902b609d5c8ef5c | [
"MIT"
] | null | null | null | ospweb/users/tests/test_views.py | asheeq/ospweb | 4aeb3037d8c59d5dcfca2e929902b609d5c8ef5c | [
"MIT"
] | null | null | null | ospweb/users/tests/test_views.py | asheeq/ospweb | 4aeb3037d8c59d5dcfca2e929902b609d5c8ef5c | [
"MIT"
] | null | null | null | import pytest
from django.contrib.auth.models import AnonymousUser
from django.http.response import Http404
from django.test import RequestFactory
from ospweb.users.models import User
from ospweb.users.tests.factories import UserFactory
from ospweb.users.views import UserRedirectView, UserUpdateView, user_detail_view
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf: RequestFactory):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
def test_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = AnonymousUser()
response = user_detail_view(request, username=user.username)
assert response.status_code == 302
assert response.url == "/accounts/login/?next=/fake-url/"
def test_case_sensitivity(self, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory(username="UserName")
with pytest.raises(Http404):
user_detail_view(request, username="username")
| 30.907895 | 81 | 0.677735 |
3f9c1c9442a2f364a10fc1718bebff5653cf7007 | 898 | py | Python | end_to_end_tests/golden-record/my_test_api_client/models/test_inline_objects_response_200.py | JamesHinshelwood/openapi-python-client | 08bc2acf91d50d4fcdd7f0e50a8b0d3f00a187d9 | [
"MIT"
] | 172 | 2020-02-15T20:14:16.000Z | 2021-06-09T07:09:15.000Z | end_to_end_tests/golden-record/my_test_api_client/models/test_inline_objects_response_200.py | guioliveirabh/openapi-python-client | d8d9cecffe41c5dc1c43bc667598b90f0d9253ca | [
"MIT"
] | 410 | 2020-02-15T19:39:29.000Z | 2021-06-09T19:28:57.000Z | end_to_end_tests/golden-record/my_test_api_client/models/test_inline_objects_response_200.py | guioliveirabh/openapi-python-client | d8d9cecffe41c5dc1c43bc667598b90f0d9253ca | [
"MIT"
] | 38 | 2020-04-12T09:36:27.000Z | 2021-06-11T08:57:07.000Z | from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="TestInlineObjectsResponse200")
@attr.s(auto_attribs=True)
class TestInlineObjectsResponse200:
"""
Attributes:
a_property (Union[Unset, str]):
"""
a_property: Union[Unset, str] = UNSET
def to_dict(self) -> Dict[str, Any]:
a_property = self.a_property
field_dict: Dict[str, Any] = {}
field_dict.update({})
if a_property is not UNSET:
field_dict["a_property"] = a_property
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
a_property = d.pop("a_property", UNSET)
test_inline_objects_response_200 = cls(
a_property=a_property,
)
return test_inline_objects_response_200
| 23.025641 | 63 | 0.632517 |
3b4af9a1941ee723cc89b4143076ab53eb2f6d3e | 83,719 | py | Python | python/ccxt/base/exchange.py | haobtc/ccxt | c4a00014a66bd015091ff5c4e94b043a9a5a799a | [
"MIT"
] | null | null | null | python/ccxt/base/exchange.py | haobtc/ccxt | c4a00014a66bd015091ff5c4e94b043a9a5a799a | [
"MIT"
] | null | null | null | python/ccxt/base/exchange.py | haobtc/ccxt | c4a00014a66bd015091ff5c4e94b043a9a5a799a | [
"MIT"
] | 1 | 2021-05-15T16:13:38.000Z | 2021-05-15T16:13:38.000Z | # -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.21.20'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
# -----------------------------------------------------------------------------
# rsa jwt signing
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# -----------------------------------------------------------------------------
# ecdsa signing
from ccxt.static_dependencies import ecdsa
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
# web3/0x imports
try:
from web3 import Web3, HTTPProvider
except ImportError:
Web3 = HTTPProvider = None # web3/0x not supported in Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
certified = False
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
verify = True # SSL verification
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
timeframes = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': DDoSProtection,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
trades = None
transactions = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
web3 = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.orders = dict() if self.orders is None else self.orders
self.trades = list() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session or self.asyncio_loop else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not self.web3:
self.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['api_backup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'api_backup' in self.urls:
self.urls['api'] = self.urls['api_backup']
del self.urls['api_backup']
@classmethod
def define_rest_api(cls, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
def partialer():
outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify
)
http_response = response.text
http_status_code = response.status_code
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
raise RequestTimeout(method + ' ' + url)
except TooManyRedirects as e:
raise ExchangeError(method + ' ' + url)
except SSLError as e:
raise ExchangeError(method + ' ' + url)
except HTTPError as e:
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
raise ExchangeError(method + ' ' + url)
except RequestException as e: # base exception class
error_string = str(e)
if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string):
raise NetworkError(method + ' ' + url)
else:
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
def handle_rest_errors(self, http_status_code, http_status_text, body, url, method):
error = None
string_code = str(http_status_code)
if string_code in self.httpExceptions:
error = self.httpExceptions[string_code]
if error == ExchangeNotAvailable:
if re.search('(cloudflare|incapsula|overload|ddos)', body, flags=re.IGNORECASE):
error = DDoSProtection
if error:
raise error(' '.join([method, url, string_code, http_status_text, body]))
def handle_rest_response(self, response, json_response, url, method):
if self.is_json_encoded_object(response) and json_response is None:
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
raise DDoSProtection(' '.join([method, url, response]))
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
raise ExchangeNotAvailable(' '.join([method, url, response, message]))
raise ExchangeError(' '.join([method, url, response]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
def is_text_response(self, headers):
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, basestring) and value.isnumeric():
return int(float(value) * factor)
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def urlencode(params={}):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params))
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
# your welcome - frosty00
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.numberToLE(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError('requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
raise InvalidAddress('address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress('address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
def fetch_bids_asks(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported('fetch_order() is not supported yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported('fetch_order_trades() is not supported yet')
def fetch_transactions(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
def fetch_deposits(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError('unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = self.sum(balance[currency].get('free'), balance[currency].get('used'))
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = self.sum(balance[currency]['total'], -balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = self.sum(balance[currency]['total'], -balance[currency]['free'])
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported('fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported('fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
# Get offset based on timeframe in milliseconds
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.extend(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
array = self.to_array(orders)
array = [self.extend(self.parse_order(order, market), params) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_currency_code(self, currency_id, currency=None):
code = None
if currency_id is not None:
if self.currencies_by_id is not None and currency_id in self.currencies_by_id:
code = self.currencies_by_id[currency_id]['code']
else:
code = self.common_currency_code(currency_id.upper())
if code is None and currency is not None:
code = currency['code']
return code
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None):
array = self.to_array(array)
if value:
array = [entry for entry in array if entry[field] == value]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit)
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError('Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
raise ExchangeError('Does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError('Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
raise BadSymbol('{} does not have market symbol {}'.format(self.id, symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
# -------------------------------------------------------------------------
# web3 / 0x methods
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
def eth_decimals(self, unit='ether'):
units = {
'wei': 0, # 1
'kwei': 3, # 1000
'babbage': 3, # 1000
'femtoether': 3, # 1000
'mwei': 6, # 1000000
'lovelace': 6, # 1000000
'picoether': 6, # 1000000
'gwei': 9, # 1000000000
'shannon': 9, # 1000000000
'nanoether': 9, # 1000000000
'nano': 9, # 1000000000
'szabo': 12, # 1000000000000
'microether': 12, # 1000000000000
'micro': 12, # 1000000000000
'finney': 15, # 1000000000000000
'milliether': 15, # 1000000000000000
'milli': 15, # 1000000000000000
'ether': 18, # 1000000000000000000
'kether': 21, # 1000000000000000000000
'grand': 21, # 1000000000000000000000
'mether': 24, # 1000000000000000000000000
'gether': 27, # 1000000000000000000000000000
'tether': 30, # 1000000000000000000000000000000
}
return self.safe_value(units, unit)
def eth_unit(self, decimals=18):
units = {
0: 'wei', # 1000000000000000000
3: 'kwei', # 1000000000000000
6: 'mwei', # 1000000000000
9: 'gwei', # 1000000000
12: 'szabo', # 1000000
15: 'finney', # 1000
18: 'ether', # 1
21: 'kether', # 0.001
24: 'mether', # 0.000001
27: 'gether', # 0.000000001
30: 'tether', # 0.000000000001
}
return self.safe_value(units, decimals)
def fromWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
raise NotSupported("ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
amount = int(amount) * (10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return float(Web3.fromWei(int(amount), unit))
def toWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
raise NotSupported("ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
# this case has known yet unsolved problems:
# toWei(1.999, 'ether', 17) == '199900000000000011'
# toWei(1.999, 'ether', 19) == '19989999999999999991'
# the best solution should not involve additional dependencies
amount = Decimal(amount) / Decimal(10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return str(Web3.toWei(amount, unit))
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = self.web3.sha3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else (int(value, 16) if str(value)[:2] == '0x' else int(value)) for value in array]
def getZeroExOrderHash2(self, order):
return self.soliditySha3([
order['exchangeContractAddress'], # address
order['maker'], # address
order['taker'], # address
order['makerTokenAddress'], # address
order['takerTokenAddress'], # address
order['feeRecipient'], # address
order['makerTokenAmount'], # uint256
order['takerTokenAmount'], # uint256
order['makerFee'], # uint256
order['takerFee'], # uint256
order['expirationUnixTimestampSec'], # uint256
order['salt'], # uint256
])
def getZeroExOrderHash(self, order):
unpacked = [
self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
types = [
'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.maker, type: types_1.SolidityTypes.Address },
'address', # { value: order.taker, type: types_1.SolidityTypes.Address },
'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
return self.web3.soliditySha3(types, unpacked).hex()
@staticmethod
def remove_0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def getZeroExOrderHashV2(self, order):
# https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py
def pad_20_bytes_to_32(twenty_bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i):
return i.to_bytes(32, byteorder="big")
def to_bytes(value):
if not isinstance(value, str):
raise TypeError("Value must be an instance of str")
if len(value) % 2:
value = "0x0" + self.remove_0x_prefix(value)
return base64.b16decode(self.remove_0x_prefix(value), casefold=True)
domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5"
order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o'
header = b"\x19\x01"
domain_struct_hash = self.web3.sha3(
domain_struct_header +
pad_20_bytes_to_32(to_bytes(order["exchangeAddress"]))
)
order_struct_hash = self.web3.sha3(
order_schema_hash +
pad_20_bytes_to_32(to_bytes(order["makerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["takerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) +
pad_20_bytes_to_32(to_bytes(order["senderAddress"])) +
int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["makerFee"])) +
int_to_32_big_endian_bytes(int(order["takerFee"])) +
int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) +
int_to_32_big_endian_bytes(int(order["salt"])) +
self.web3.sha3(to_bytes(order["makerAssetData"])) +
self.web3.sha3(to_bytes(order["takerAssetData"]))
)
sha3 = self.web3.sha3(
header +
domain_struct_hash +
order_struct_hash
)
return '0x' + base64.b16encode(sha3).decode('ascii').lower()
def signZeroExOrder(self, order, privateKey):
orderHash = self.getZeroExOrderHash(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'ecSignature': signature, # todo fix v if needed
})
def signZeroExOrderV2(self, order, privateKey):
orderHash = self.getZeroExOrderHashV2(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'signature': self._convertECSignatureToSignatureHex(signature),
})
def _convertECSignatureToSignatureHex(self, signature):
# https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts
v = signature["v"]
if v != 27 and v != 28:
v = v + 27
return (
hex(v) +
signature["r"][-64:] +
signature["s"][-64:] +
"03"
)
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove_0x_prefix(message)), True)
hash_bytes = self.web3.sha3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def decimal_to_bytes(n, endian='big'):
"""int.from_bytes and int.to_bytes don't work in python2"""
if n > 0:
next_byte = Exchange.decimal_to_bytes(n // 0x100, endian)
remainder = bytes([n % 0x100])
return next_byte + remainder if endian == 'big' else remainder + next_byte
else:
return b''
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(Exchange.decimal_to_bytes(epoch, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def numberToLE(n, size):
return Exchange.decimal_to_bytes(int(n), 'little').ljust(size, b'\x00')
@staticmethod
def numberToBE(n, size):
return Exchange.decimal_to_bytes(int(n), 'big').rjust(size, b'\x00')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
# python supports arbitrarily big integers
@staticmethod
def integer_divide(a, b):
return int(a) // int(b)
@staticmethod
def integer_pow(a, b):
return int(a) ** int(b)
@staticmethod
def integer_modulo(a, b):
return int(a) % int(b)
| 40.230178 | 299 | 0.593223 |
4da799463338bea99c3933b1b8084cc56ab6a076 | 7,233 | py | Python | userbot/modules/heroku.py | AnggaraArif/Telebot | 7601643d51cecbdc4d8d9ad9a72c5366804250a8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 7 | 2020-06-27T20:15:34.000Z | 2021-01-21T12:45:48.000Z | userbot/modules/heroku.py | AnggaraArif/Telebot | 7601643d51cecbdc4d8d9ad9a72c5366804250a8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-02-19T20:07:34.000Z | 2020-02-19T20:07:34.000Z | userbot/modules/heroku.py | AnggaraArif/Telebot | 7601643d51cecbdc4d8d9ad9a72c5366804250a8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 70 | 2020-04-26T02:47:50.000Z | 2022-01-26T10:13:13.000Z | # Copyright (C) 2020 Adek Maulana.
# All rights reserved.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
"""
Heroku manager for your userbot
"""
import heroku3
import aiohttp
import math
from userbot import (
CMD_HELP,
HEROKU_APP_NAME,
HEROKU_API_KEY,
BOTLOG,
BOTLOG_CHATID
)
from userbot.events import register
Heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_api = "https://api.heroku.com"
if HEROKU_APP_NAME is not None and HEROKU_API_KEY is not None:
Heroku = heroku3.from_key(HEROKU_API_KEY)
app = Heroku.app(HEROKU_APP_NAME)
heroku_var = app.config()
else:
app = None
"""
ConfigVars setting, get current var, set var or delete var...
"""
@register(outgoing=True,
pattern=r"^\.(get|del) var(?: |$)(\w*)")
async def variable(var):
exe = var.pattern_match.group(1)
if app is None:
await var.edit("`[HEROKU]"
"\nPlease setup your` **HEROKU_APP_NAME**.")
return False
if exe == "get":
await var.edit("`Getting information...`")
variable = var.pattern_match.group(2)
if variable != '':
if variable in heroku_var:
if BOTLOG:
await var.client.send_message(
BOTLOG_CHATID, "#CONFIGVAR\n\n"
"**ConfigVar**:\n"
f"`{variable}` = `{heroku_var[variable]}`\n"
)
await var.edit("`Received to BOTLOG_CHATID...`")
return True
else:
await var.edit("`Please set BOTLOG to True...`")
return False
else:
await var.edit("`Information don't exists...`")
return True
else:
configvars = heroku_var.to_dict()
msg = ''
if BOTLOG:
for item in configvars:
msg += f"`{item}` = `{configvars[item]}`\n"
await var.client.send_message(
BOTLOG_CHATID, "#CONFIGVARS\n\n"
"**ConfigVars**:\n"
f"{msg}"
)
await var.edit("`Received to BOTLOG_CHATID...`")
return True
else:
await var.edit("`Please set BOTLOG to True...`")
return False
elif exe == "del":
await var.edit("`Deleting information...`")
variable = var.pattern_match.group(2)
if variable == '':
await var.edit("`Specify ConfigVars you want to del...`")
return False
if variable in heroku_var:
if BOTLOG:
await var.client.send_message(
BOTLOG_CHATID, "#DELCONFIGVAR\n\n"
"**Delete ConfigVar**:\n"
f"`{variable}`"
)
await var.edit("`Information deleted...`")
del heroku_var[variable]
else:
await var.edit("`Information don't exists...`")
return True
@register(outgoing=True, pattern=r'^\.set var (\w*) ([\s\S]*)')
async def set_var(var):
await var.edit("`Setting information...`")
variable = var.pattern_match.group(1)
value = var.pattern_match.group(2)
if variable in heroku_var:
if BOTLOG:
await var.client.send_message(
BOTLOG_CHATID, "#SETCONFIGVAR\n\n"
"**Change ConfigVar**:\n"
f"`{variable}` = `{value}`"
)
await var.edit("`Information sets...`")
else:
if BOTLOG:
await var.client.send_message(
BOTLOG_CHATID, "#ADDCONFIGVAR\n\n"
"**Add ConfigVar**:\n"
f"`{variable}` = `{value}`"
)
await var.edit("`Information added...`")
heroku_var[variable] = value
"""
Check account quota, remaining quota, used quota, used app quota
"""
@register(outgoing=True, pattern=r"^\.usage(?: |$)")
async def dyno_usage(dyno):
"""
Get your account Dyno Usage
"""
await dyno.edit("`Getting Information...`")
useragent = ('Mozilla/5.0 (Linux; Android 10; SM-G975F) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.149 Mobile Safari/537.36'
)
user_id = Heroku.account().id
headers = {
'User-Agent': useragent,
'Authorization': f'Bearer {HEROKU_API_KEY}',
'Accept': 'application/vnd.heroku+json; version=3.account-quotas',
}
path = "/accounts/" + user_id + "/actions/get-quota"
async with aiohttp.ClientSession() as session:
async with session.get(heroku_api + path, headers=headers) as r:
if r.status != 200:
await dyno.client.send_message(
dyno.chat_id,
f"`{r.reason}`",
reply_to=dyno.id
)
await dyno.edit("`Can't get information...`")
return False
result = await r.json()
quota = result['account_quota']
quota_used = result['quota_used']
""" - User Quota Limit and Used - """
remaining_quota = quota - quota_used
percentage = math.floor(remaining_quota / quota * 100)
minutes_remaining = remaining_quota / 60
hours = math.floor(minutes_remaining / 60)
minutes = math.floor(minutes_remaining % 60)
""" - User App Used Quota - """
Apps = result['apps']
for apps in Apps:
if apps.get('app_uuid') == app.id:
AppQuotaUsed = apps.get('quota_used') / 60
AppPercentage = math.floor(
apps.get('quota_used') * 100 / quota)
break
else:
AppQuotaUsed = 0
AppPercentage = 0
AppHours = math.floor(AppQuotaUsed / 60)
AppMinutes = math.floor(AppQuotaUsed % 60)
await dyno.edit(
"**Dyno Usage**:\n\n"
f"-> `Dyno usage for` **{app.name}**:\n"
f" • **{AppHours} hour(s), "
f"{AppMinutes} minute(s) - {AppPercentage}%**"
"\n\n"
"-> `Dyno hours quota remaining this month`:\n"
f" • **{hours} hour(s), {minutes} minute(s) "
f"- {percentage}%**"
)
return True
CMD_HELP.update({
"heroku":
">.`usage`"
"\nUsage: Check your heroku dyno hours remaining"
"\n\n>`.set var <NEW VAR> <VALUE>`"
"\nUsage: add new variable or update existing value variable"
"\n!!! WARNING !!!, after setting a variable the bot will restarted"
"\n\n>`.get var or .get var <VAR>`"
"\nUsage: get your existing varibles, use it only on your private group!"
"\nThis returns all of your private information, please be caution..."
"\n\n>`.del var <VAR>`"
"\nUsage: delete existing variable"
"\n!!! WARNING !!!, after deleting variable the bot will restarted"
})
| 33.957746 | 78 | 0.521499 |
1652e9ed5baedccf6a74c917351cc8a965434792 | 1,331 | py | Python | ERation/shop/urls.py | mishelshaji/ERation | 0e755ae63ae2c9cc009e18b72425ac508670a1cf | [
"MIT"
] | null | null | null | ERation/shop/urls.py | mishelshaji/ERation | 0e755ae63ae2c9cc009e18b72425ac508670a1cf | [
"MIT"
] | null | null | null | ERation/shop/urls.py | mishelshaji/ERation | 0e755ae63ae2c9cc009e18b72425ac508670a1cf | [
"MIT"
] | null | null | null | from django.urls import path
from . import views as v
urlpatterns = [
path('', v.home, name='shop_home'),
path('shopprofile/', v.shop_profile, name='shop_profile'),
path('newcustomer/', v.new_customer, name='shop_new_customer'),
path('viewcustomer/<int:id>', v.view_customer, name='shop_view_customer'),
path('deletecustomer/<int:id>', v.delete_customer, name='shop_delete_customer'),
path('viewallocations/', v.view_allocations, name='view_shop_allocations'),
path('viewcards/', v.view_cards, name='shop_view_cards'),
path('addsales/', v.add_sales, name='shop_add_sales'),
path('newsale/<str:product>/<str:card_number>', v.new_sale, name='shop_new_sale'),
path('monthlyreport/', v.monthly_report, name='shop_monthly_report'),
path('stockupdate/', v.stock_update, name='shop_update_stock'),
path('viewstockupdates/', v.view_stock_update, name='shop_view_stock_updates'),
path('newdeliverystaff/', v.new_delivery_staff, name='shop_new_delivery_staff'),
path('viewstaffs/', v.view_delivery_staffs, name='shop_view_delivery_staffs'),
path('editstaff/<str:id>', v.edit_delivery_staff, name='shop_edit_delivery_staff'),
path('vieworders/', v.view_orders, name='shop_view_orders'),
path('vieworder/<int:orderid>', v.manage_order, name='shop_manage_order'),
]
| 41.59375 | 87 | 0.722014 |
d1bf981de4deb8fb9974128afe005df061a3506d | 8,003 | py | Python | frontend/tdop.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | frontend/tdop.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | frontend/tdop.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | """
tdop.py - Library for expression parsing.
"""
from _devbuild.gen.id_kind_asdl import Id, Id_t
from _devbuild.gen.syntax_asdl import (
arith_expr, arith_expr_t,
arith_expr__ArithWord, arith_expr__UnaryAssign, arith_expr__VarRef,
arith_expr__Binary, arith_expr__BinaryAssign,
sh_lhs_expr, sh_lhs_expr_t, sh_lhs_expr__Name,
word_t,
)
from _devbuild.gen.types_asdl import lex_mode_e
from core import util
from osh import word_
from typing import Callable, List, Dict, NoReturn, TYPE_CHECKING
if TYPE_CHECKING: # break circular dep
from osh.word_parse import WordParser
NullFunc = Callable[[TdopParser, word_t, int], arith_expr_t]
LeftFunc = Callable[[TdopParser, word_t, arith_expr_t, int], arith_expr_t]
p_die = util.p_die
def IsIndexable(node):
# type: (arith_expr_t) -> bool
"""Is the word callable or indexable?
Args:
node: ExprNode
"""
if isinstance(node, arith_expr__VarRef):
return True # f[1] is allowed
return False
def ToLValue(node):
# type: (arith_expr_t) -> sh_lhs_expr_t
"""Determine if a node is a valid L-value by whitelisting tags.
Args:
node: ExprNode (could be VarExprNode or BinaryExprNode)
"""
# foo = bar, foo[1] = bar
if isinstance(node, arith_expr__VarRef):
# For consistency with osh/cmd_parse.py, append a span_id.
# TODO: (( a[ x ] = 1 )) and a[x]=1 should use different LST nodes.
n = sh_lhs_expr.Name(node.token.val)
n.spids.append(node.token.span_id)
return n
if isinstance(node, arith_expr__Binary):
# For example, a[0][0] = 1 is NOT valid.
if (node.op_id == Id.Arith_LBracket and
isinstance(node.left, arith_expr__VarRef)):
return sh_lhs_expr.IndexedName(node.left.token.val, node.right)
return None
#
# Null Denotation
#
def NullError(p, t, bp):
# type: (TdopParser, word_t, int) -> NoReturn
# TODO: I need position information
p_die("Token can't be used in prefix position", word=t)
def NullConstant(p, w, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
var_name_token = word_.LooksLikeArithVar(w)
if var_name_token:
return arith_expr.VarRef(var_name_token)
return arith_expr.ArithWord(w)
def NullParen(p, t, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
""" Arithmetic grouping """
r = p.ParseUntil(bp)
p.Eat(Id.Arith_RParen)
return r
def NullPrefixOp(p, w, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
"""Prefix operator.
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
right = p.ParseUntil(bp)
return arith_expr.Unary(word_.ArithId(w), right)
#
# Left Denotation
#
def LeftError(p, t, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> NoReturn
# Hm is this not called because of binding power?
p_die("Token can't be used in infix position", word=t)
def LeftBinaryOp(p, w, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> arith_expr_t
""" Normal binary operator like 1+2 or 2*3, etc. """
# TODO: w shoudl be a Token, and we should extract the token from it.
return arith_expr.Binary(word_.ArithId(w), left, p.ParseUntil(rbp))
def LeftAssign(p, w, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> arith_expr_t
""" Normal binary operator like 1+2 or 2*3, etc. """
# x += 1, or a[i] += 1
lhs = ToLValue(left)
if lhs is None:
# TODO: It would be nice to point at 'left', but osh/word.py doesn't
# support arbitrary arith_expr_t.
#p_die("Can't assign to this expression", word=w)
p_die("Left-hand side of this assignment is invalid", word=w)
return arith_expr.BinaryAssign(word_.ArithId(w), lhs, p.ParseUntil(rbp))
#
# Parser definition
#
class LeftInfo(object):
"""Row for operator.
In C++ this should be a big array.
"""
def __init__(self, led=None, lbp=0, rbp=0):
# type: (LeftFunc, int, int) -> None
self.led = led or LeftError
self.lbp = lbp
self.rbp = rbp
class NullInfo(object):
"""Row for operator.
In C++ this should be a big array.
"""
def __init__(self, nud=None, bp=0):
# type: (NullFunc, int) -> None
self.nud = nud or LeftError
self.bp = bp
class ParserSpec(object):
"""Specification for a TDOP parser.
This can be compiled to a table in C++.
"""
def __init__(self):
# type: () -> None
self.nud_lookup = {} # type: Dict[Id_t, NullInfo]
self.led_lookup = {} # type: Dict[Id_t, LeftInfo]
def Null(self, bp, nud, tokens):
# type: (int, NullFunc, List[Id_t]) -> None
"""Register a token that doesn't take anything on the left.
Examples: constant, prefix operator, error.
"""
for token in tokens:
self.nud_lookup[token] = NullInfo(nud=nud, bp=bp)
if token not in self.led_lookup:
self.led_lookup[token] = LeftInfo() # error
def _RegisterLed(self, lbp, rbp, led, tokens):
# type: (int, int, LeftFunc, List[Id_t]) -> None
for token in tokens:
if token not in self.nud_lookup:
self.nud_lookup[token] = NullInfo(NullError)
self.led_lookup[token] = LeftInfo(lbp=lbp, rbp=rbp, led=led)
def Left(self, bp, led, tokens):
# type: (int, LeftFunc, List[Id_t]) -> None
"""Register a token that takes an expression on the left."""
self._RegisterLed(bp, bp, led, tokens)
def LeftRightAssoc(self, bp, led, tokens):
# type: (int, LeftFunc, List[Id_t]) -> None
"""Register a right associative operator."""
self._RegisterLed(bp, bp - 1, led, tokens)
def LookupNud(self, token):
# type: (Id_t) -> NullInfo
try:
nud = self.nud_lookup[token]
except KeyError:
raise AssertionError('No nud for token %r' % token)
return nud
def LookupLed(self, token):
# type: (Id_t) -> LeftInfo
"""Get a left_info for the token."""
return self.led_lookup[token]
#EOF_TOKEN = Token('eof', 'eof')
class TdopParser(object):
"""
Parser state. Current token and lookup stack.
"""
def __init__(self, spec, w_parser):
# type: (ParserSpec, WordParser) -> None
self.spec = spec
self.w_parser = w_parser
self.cur_word = None # type: word_t # current token
self.op_id = Id.Undefined_Tok
def AtToken(self, token_type):
# type: (Id_t) -> bool
return self.op_id == token_type
def Eat(self, token_type):
# type: (Id_t) -> None
"""Assert that we're at the current token and advance."""
if not self.AtToken(token_type):
p_die('Parser expected %s, got %s', token_type, self.cur_word,
word=self.cur_word)
self.Next()
def Next(self):
# type: () -> bool
self.cur_word = self.w_parser.ReadWord(lex_mode_e.Arith)
self.op_id = word_.ArithId(self.cur_word)
return True
def ParseUntil(self, rbp):
# type: (int) -> arith_expr_t
"""
Parse to the right, eating tokens until we encounter a token with binding
power LESS THAN OR EQUAL TO rbp.
"""
# TODO: use Kind.Eof
if self.op_id in (Id.Eof_Real, Id.Eof_RParen, Id.Eof_Backtick):
p_die('Unexpected end of input', word=self.cur_word)
t = self.cur_word
null_info = self.spec.LookupNud(self.op_id)
self.Next() # skip over the token, e.g. ! ~ + -
node = null_info.nud(self, t, null_info.bp)
while True:
t = self.cur_word
try:
left_info = self.spec.LookupLed(self.op_id)
except KeyError:
raise AssertionError('Invalid token %s' % t)
# Examples:
# If we see 1*2+ , rbp = 27 and lbp = 25, so stop.
# If we see 1+2+ , rbp = 25 and lbp = 25, so stop.
# If we see 1**2**, rbp = 26 and lbp = 27, so keep going.
if rbp >= left_info.lbp:
break
self.Next() # skip over the token, e.g. / *
node = left_info.led(self, t, node, left_info.rbp)
return node
def Parse(self):
# type: () -> arith_expr_t
self.Next() # may raise ParseError
return self.ParseUntil(0)
| 28.080702 | 77 | 0.655629 |
f13047ab2222ba207d0ee275dd3f0c9fe9d1b5c6 | 1,851 | py | Python | GPyOpt/acquisitions/LCB.py | zhenwendai/GPyOpt | fd96875e7ec0cb0f78014d96813ece400648827d | [
"BSD-3-Clause"
] | 850 | 2015-05-31T21:12:41.000Z | 2022-03-24T17:25:37.000Z | GPyOpt/acquisitions/LCB.py | lakshaykc/GPyOpt | 097ba66e81c7e22b5bf9fdbe64fd135753bc4a67 | [
"BSD-3-Clause"
] | 340 | 2015-09-10T14:08:06.000Z | 2022-03-28T20:35:26.000Z | GPyOpt/acquisitions/LCB.py | lakshaykc/GPyOpt | 097ba66e81c7e22b5bf9fdbe64fd135753bc4a67 | [
"BSD-3-Clause"
] | 299 | 2015-07-30T13:18:37.000Z | 2022-03-22T21:27:31.000Z | # Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .base import AcquisitionBase
from ..util.general import get_quantiles
class AcquisitionLCB(AcquisitionBase):
"""
GP-Lower Confidence Bound acquisition function with constant exploration weight.
See:
Gaussian Process Optimization in the Bandit Setting: No Regret and Experimental Design
Srinivas et al., Proc. International Conference on Machine Learning (ICML), 2010
:param model: GPyOpt class of model
:param space: GPyOpt class of domain
:param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer
:param cost_withGradients: function
:param jitter: positive value to make the acquisition more explorative
.. Note:: does not allow to be used with cost
"""
analytical_gradient_prediction = True
def __init__(self, model, space, optimizer=None, cost_withGradients=None, exploration_weight=2):
self.optimizer = optimizer
super(AcquisitionLCB, self).__init__(model, space, optimizer)
self.exploration_weight = exploration_weight
if cost_withGradients is not None:
print('The set cost function is ignored! LCB acquisition does not make sense with cost.')
def _compute_acq(self, x):
"""
Computes the GP-Lower Confidence Bound
"""
m, s = self.model.predict(x)
f_acqu = -m + self.exploration_weight * s
return f_acqu
def _compute_acq_withGradients(self, x):
"""
Computes the GP-Lower Confidence Bound and its derivative
"""
m, s, dmdx, dsdx = self.model.predict_withGradients(x)
f_acqu = -m + self.exploration_weight * s
df_acqu = -dmdx + self.exploration_weight * dsdx
return f_acqu, df_acqu
| 35.596154 | 103 | 0.688277 |
901d9efd3ffd97f58dce5dfd6bef6e4deb4fda71 | 1,308 | py | Python | tests/caliban/util/test_tqdm.py | Anon-Artist/caliban | 6d12561f735fe1f9157e45b06d9da002ee5059e5 | [
"ECL-2.0",
"Apache-2.0"
] | 425 | 2020-06-02T18:14:09.000Z | 2022-03-30T17:32:08.000Z | tests/caliban/util/test_tqdm.py | Anon-Artist/caliban | 6d12561f735fe1f9157e45b06d9da002ee5059e5 | [
"ECL-2.0",
"Apache-2.0"
] | 95 | 2020-06-11T22:50:29.000Z | 2022-01-10T06:10:30.000Z | tests/caliban/util/test_tqdm.py | Anon-Artist/caliban | 6d12561f735fe1f9157e45b06d9da002ee5059e5 | [
"ECL-2.0",
"Apache-2.0"
] | 50 | 2020-06-11T22:01:18.000Z | 2022-01-12T19:54:10.000Z | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from tqdm.utils import _term_move_up
import caliban.util.tqdm as ut
def test_carriage_return():
def through(xs):
buf = io.StringIO()
f = ut.TqdmFile(file=buf)
for x in xs:
f.write(x)
f.flush()
return buf.getvalue()
# Strings pass through tqdmfile with no newline attached.
assert through(["Yo!"]) == "Yo!"
# Empty lines do nothing.
assert through(["", "", ""]) == ""
# A carriage return is converted to a newline, but the next line, if it's
# written, will have the proper prefix to trigger a carriage return.
assert through(["Yo!\r"]) == "Yo!\n"
# Boom, triggered.
assert through(["Yo!\r", "continue"]) == f"Yo!\n{_term_move_up()}\rcontinue"
| 27.25 | 78 | 0.692661 |
62daca9b6f092568638bf3886f9f14ecdd051875 | 6,930 | py | Python | kolibri/core/device/translation.py | yujinyuz/kolibri | 4d5c6c87679f01c0bbb53bb850b8f4c8041ed860 | [
"MIT"
] | null | null | null | kolibri/core/device/translation.py | yujinyuz/kolibri | 4d5c6c87679f01c0bbb53bb850b8f4c8041ed860 | [
"MIT"
] | null | null | null | kolibri/core/device/translation.py | yujinyuz/kolibri | 4d5c6c87679f01c0bbb53bb850b8f4c8041ed860 | [
"MIT"
] | null | null | null | """
Modified from django.utils.translation.trans_real
"""
from __future__ import unicode_literals
import re
from django.conf import settings
from django.core.cache import cache
from django.db.utils import OperationalError
from django.db.utils import ProgrammingError
from django.urls import resolve
from django.urls import Resolver404
from django.urls.resolvers import RegexURLResolver
from django.utils.translation import get_language
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import check_for_language
from django.utils.translation.trans_real import get_language_from_path
from django.utils.translation.trans_real import get_languages
from django.utils.translation.trans_real import get_supported_language_variant
from django.utils.translation.trans_real import language_code_re
from django.utils.translation.trans_real import parse_accept_lang_header
DEVICE_LANGUAGE_CACHE_KEY = "DEVICE_LANGUAGE_CACHE_KEY"
def get_device_language():
from .models import DeviceSettings
try:
if cache.get(DEVICE_LANGUAGE_CACHE_KEY) is None:
# Use a relatively short expiry, in case the device setting is changed in another
# thread and this cache does not get invalidated.
cache.set(
DEVICE_LANGUAGE_CACHE_KEY, DeviceSettings.objects.get().language_id, 600
)
return get_supported_language_variant(cache.get(DEVICE_LANGUAGE_CACHE_KEY))
except (
DeviceSettings.DoesNotExist,
LookupError,
OperationalError,
ProgrammingError,
):
return None
def get_accept_headers_language(request):
accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "")
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
def get_settings_language():
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
def get_language_from_request_and_is_from_path(request): # noqa complexity-16
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language. It also returns a value to determine if the language code
was derived from a language code in the URL, or inferred from some other source.
:returns: tuple of language code, boolean. The former can be None if the url being
requested does not require translation, otherwise it should be a language code
from the values in settings.LANGUAGES. The boolean should indicate whether the
language code was calculated by reading a language code from the requested URL.
In the case that it was, True should be returned, in the case where the URL language
code was not used or not present, False is returned.
"""
try:
# If this is not a view that needs to be translated, return None, and be done with it!
if not getattr(resolve(request.path_info).func, "translated", False):
return None, False
except Resolver404:
# If this is an unrecognized URL, it may be redirectable to a language prefixed
# URL, so let the language code setting carry on from here.
pass
supported_lang_codes = get_languages()
lang_code = get_language_from_path(request.path_info)
if lang_code in supported_lang_codes and lang_code is not None:
return lang_code, True
if hasattr(request, "session"):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if (
lang_code in supported_lang_codes
and lang_code is not None
and check_for_language(lang_code)
):
return lang_code, False
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code), False
except LookupError:
pass
device_language = get_device_language()
if device_language is not None:
return device_language, False
headers_language = get_accept_headers_language(request)
if headers_language is not None:
return headers_language, False
return get_settings_language(), False
def i18n_patterns(urls, prefix=None):
"""
Add the language code prefix to every URL pattern within this function.
Vendored from https://github.com/django/django/blob/stable/1.11.x/django/conf/urls/i18n.py
to allow use of this outside of the root URL conf to prefix plugin non-api urls.
"""
if not settings.USE_I18N:
return list(urls)
def recurse_urls_and_set(urls_to_set):
for url in urls_to_set:
if hasattr(url, "urlpatterns") and url.urlpatterns:
recurse_urls_and_set(url.urlpatterns)
elif hasattr(url, "callback") and url.callback:
setattr(url.callback, "translated", True)
recurse_urls_and_set(urls)
return [LocaleRegexURLResolver(list(urls), prefix=prefix)]
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
Vendored from https://github.com/django/django/blob/stable/1.11.x/django/urls/resolvers.py
As using the Django internal version inside included URL configs is disallowed.
Rather than monkey patch Django to allow this for our use case, make a copy of this here
and use this instead.
"""
def __init__(
self,
urlconf_name,
default_kwargs=None,
app_name=None,
namespace=None,
prefix_default_language=True,
prefix=None,
):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace
)
self.prefix_default_language = prefix_default_language
self._prefix = prefix
@property
def regex(self):
device_language = get_device_language() or get_settings_language()
language_code = get_language() or device_language
if language_code not in self._regex_dict:
if language_code == device_language and not self.prefix_default_language:
regex_string = self._prefix or ""
else:
regex_string = ("^%s/" % language_code) + (self._prefix or "")
self._regex_dict[language_code] = re.compile(regex_string, re.UNICODE)
return self._regex_dict[language_code]
| 37.258065 | 94 | 0.71443 |
810c51b498ed1ce277311369fa429420cbf1efb3 | 18,032 | py | Python | svutil/gen/LatexGen.py | enhoshen/SVutil | 6fff5a4d1f53519e1806f20371ef99cdd350e3e5 | [
"MIT"
] | 2 | 2019-10-03T06:27:02.000Z | 2019-12-27T15:58:24.000Z | svutil/gen/LatexGen.py | enhoshen/SVutil | 6fff5a4d1f53519e1806f20371ef99cdd350e3e5 | [
"MIT"
] | null | null | null | svutil/gen/LatexGen.py | enhoshen/SVutil | 6fff5a4d1f53519e1806f20371ef99cdd350e3e5 | [
"MIT"
] | null | null | null | import os
import sys
from svutil.SVparse import *
from svutil.SVgen import *
from svutil.SVclass import *
import itertools
import numpy as np
import re
@SVgen.user_class
class LatexGen(SVgen):
# TODO $clog2 in latex
def __init__(self, session=None, regbk=None, dut=None):
super().__init__(session=session)
self.customlst = ["default_input_delay", "struct_lvl"]
self.default_input_delay = 30
self.struct_lvl = 2
if regbk and type(regbk) == str:
self.regbk = SVRegbk(self.session.hiers.get(regbk))
else:
self.regbk = SVRegbk(self.regbk) if self.regbk else None
self.dut = dut if dut else self.dut
def reload(self):
self.session.reload()
self.refresh()
self.regbk = SVRegbk(self.regbk) if self.regbk else None
def l_(self, s):
return s.replace("_", "\_")
def parameter_str(self, param):
ind = Ind(1)
param = SVParam(param)
param_tp = param.tp
s = ""
desp = "\n"
memblist = []
tp = self.cur_module.AllType.get(param_tp)
if tp:
memb_tp = [SVType(i) for i in tp]
if memb_tp[0].tp == "enum":
en = SVEnums(self.cur_module.AllEnums[memb_tp[0].name])
desp += self.enuml_desp_str(en, ind)
name = self.l_(param.name)
num = self.l_(param.numstr)
s = f"{ind.b}\\parameter{{ {name} }} {{ \n"
s += (
f"{ind[1]}\\parameterDES{{ }} {{ {num} }} {{ None }} {{ {desp}{ind[1]}}}\n"
)
s += f"{ind.b}}}\n"
s = s.replace("$", "\\$")
return s
def signal_str(self, sig, clk=None, ind=None):
ind = Ind(1) if ind is None else ind
tpfield = SVhier.typefield
sig = SVPort(sig)
sig_tp = sig.tp
s = ""
memblist = []
if self.cur_module.AllType.get(sig_tp):
tp = [SVType(i) for i in self.cur_module.AllType.get(sig_tp)]
else:
tp = [None]
if sig_tp != "logic" and sig_tp != "logic signed" and len(tp) != 1:
# for memb in self.cur_module.AllType.get(sig_tp):
# memb = SVType(memb)
# name = self.l_(sig.name+'.'+memb.name +' '+sig.dimstr)
# width = str(memb.bw)
# active = 'LOW' if name[-2:] == '_n' else ( 'HIGH' if width =='1' else 'N/A' )
# if '-1:0' in width:
# width = width.split('-')[0][1:]
# io = sig.direction
# io = 'Input' if io =='input' else 'Output'
# memb_tp = self.cur_module.AllType.get(memb.tp)
# desp='\\TODO\n'
# if memb_tp :
# memb_tp = [SVType(i) for i in memb_tp]
# if memb_tp[0].tp =='enum':
# desp += self.enuml_desp_str(memb_tp[0].enumliteral, ind )
# memblist.append( (name, io, desp, width, active, clk) )
sig_struct = self.cur_module.AllType.get(sig_tp)
memblist = self.memlist_append(
self.cur_module, sig, sig_struct, ind, clk, self.struct_lvl
)
memblist = [(self.l_(sig.name + ".") + name, *_) for name, *_ in memblist]
else:
desp = f"\n{ind[2]}\\TODO\\\\\n"
if sig_tp == "enum":
en = SVEnums(self.cur_module.AllEnums[sig.tp])
desp += self.enuml_desp_str(en, ind)
if sig_tp != "logic" and sig_tp != "logic signed" and len(tp) == 1:
try:
en = SVEnums(self.cur_module.AllEnums[tp[0].name])
desp += self.enuml_desp_str(en, ind)
except:
self.print(tp, sig_tp)
name = self.l_(sig.name + " " + sig.dimstr)
width = "1" if sig.bwstr == "" else sig.bwstr.replace("_", "\_")
active = (
"LOW"
if name[-2:] == "_n"
else ("HIGH" if width == "1" and not "clk" in sig.name else "N/A")
)
if "-1:0" in width:
width = width.split("-")[0][1:]
io = sig.direction
io = "Input" if io == "input" else "Output"
memblist.append((name, io, desp, width, active, clk))
reged = "Yes" if sig.name in self.cur_module.regs else "No"
for name, io, desp, width, active, clk in memblist:
delay = self.default_input_delay if not "clk" in sig.name else "N/A"
s += f"{ind.b}\\signal{{ {name} }} {{{io}}} {{\n"
s += f"{ind[1]}\\signalDES{{ {desp} {ind[1]}}} {{ {width} }} {{ {active} }} {{ {clk} }} {{{reged}}} {{ {delay}\\%}} }}\n"
return s
def memlist_append(self, module, sig, struct, ind, clk, lvl=1):
memlist = []
for memb in struct:
memb = SVType(memb)
sub_tp = module.AllType.get(memb.tp)
if memb.tp != "logic" and memb.tp != "logic signed" and len(sub_tp) != 1:
sub_memlist = [
(self.l_(memb.name + ".") + name, *_)
for name, *_ in self.memlist_append(
module, sig, sub_tp, ind, clk, lvl - 1
)
]
self.print(sub_memlist, verbose=2)
else:
name = self.l_(memb.name + " " + sig.dimstr)
width = str(memb.bw)
active = (
"LOW" if name[-2:] == "_n" else ("HIGH" if width == "1" else "N/A")
)
if "-1:0" in width:
width = width.split("-")[0][1:]
io = sig.direction
io = "Input" if io == "input" else "Output"
memb_tp = self.cur_module.AllType.get(memb.tp)
desp = f"\n{ind[2]}\\TODO\\\\\n"
if memb_tp:
memb_tp = [SVType(i) for i in memb_tp]
if memb_tp[0].tp == "enum":
en = SVEnums(self.cur_module.AllEnums[memb_tp[0].name])
desp += self.enuml_desp_str(en, ind)
sub_memlist = [(name, io, desp, width, active, clk)]
memlist += sub_memlist
return memlist
def reg_mem_map_str(self, reg, regdesp): # reg is a SVEnuml object
"""
corresponds to latex macro memmap, the register memory map
lists
"""
reg_bsize = regdesp.reg_bsize
reg_slices = regdesp.reg_slices
reg_defaults = regdesp.reg_defaults
reg_bw = regdesp.reg_bw
reg_bw_str = regdesp.reg_bw_str
rw = regdesp.rw
arr = regdesp.arr
desp = regdesp.desp
ind = Ind(1)
name = self.l_(reg.name)
arr_suf = self.l_(self.cur_regbk.arr_num_suf)
ofs = reg.num * reg_bsize
arr = "" if arr == "" else f" [{name}{arr_suf}]"
s = f"{ind.b}\\memmap{{\\hyperref[subsubsec:{reg.name.lower()}]{{{name}}}{arr}}}"
s += f'{{{hex(ofs).upper().replace("X","x")}}}{{{reg_bw}}}{{{rw}}}{{\n'
s += f"{ind[1]}\\memDES{{\n"
if desp is None:
s += (
f"{ind[2]}\\TODO\n"
if arr == ""
else f"{ind[2]}Array register of size {name}{arr_suf}.\n"
)
else:
s += desp + "\n"
s += f"{ind[1]}}}{{\n"
reg_slices = self.reg_slice_list(reg_slices) if reg_slices else None
if reg_slices and reg_slices[0][0] == self.cur_regbk.reserved_name:
reg_slices.pop(0)
if reg_defaults:
if reg_slices:
for _slice, _default in zip(reg_slices, reg_defaults):
s += f"{ind[2]}{{[{_slice[1]}]}}: {_default.__str__()}\\\\\n"
s = s[:-2] + "\n"
else:
try:
reg_bw_str = int(reg_bw_str) - 1
reg_bw_str = "0" if reg_bw_str == 0 else f"{reg_bw_str}:0"
except:
reg_bw_str = f"{reg_bw_str}-1:0"
s += f"{ind[2]}{{[{reg_bw_str}]}}: {reg_defaults[0].__str__()}\\\\\n"
else:
reset = "\\TODO" if len(reg.cmt) < 3 else reg.cmt[2]
reset = self.l_(self.lbrac(reset))
s += f"{ind[2]}{reset}\n"
s += f"{ind[1]}}}\n"
s += f"{ind.b}}}\n"
return s
def reg_field_str(
self, reg_name, regdesp
): # str, self.cur_regbk.regslices, self.cur_regbk.regtypes; align slices to types!
"""
corresponds to latex macro regfieldtable, the register
field description.
"""
reg_slices = regdesp.reg_slices
reg_types = regdesp.reg_types
reg_membtypes = regdesp.reg_membtypes
reg_defaults = regdesp.reg_defaults
rw = regdesp.rw
desp = regdesp.desp
ind = Ind(0)
_name = reg_name.replace("_", "\_")
s = f"{ind.b}\\begin{{regfieldtable}}{{{reg_name.lower()}}}{{{_name} register field}}\n"
reg_slices = self.reg_slice_list(reg_slices)
if reg_slices[0][0] == self.cur_regbk.reserved_name:
s += f"{ind[1]}\\regfield{{{reg_slices[0][1]}}}{{RESERVED}}{{N/A}}{{reserved}}\n"
reg_slices.pop(0)
self.print(reg_types, verbose="reg_field_str")
for _slice, _type, _membtype, _default, _desp in zip(
reg_slices, reg_types, reg_membtypes, reg_defaults, desp
):
_slice_name = _slice[0].replace("_", "\_")
s += f"{ind[1]}\\regfield{{{_slice[1]}}}{{{_slice_name}}}{{{rw}}}{{\n"
s += f"{ind[2]}\\regDES{{\n"
s += f"{ind[3]}{_desp}\n"
if _membtype and _membtype[0].tp == "enum":
en = SVEnums(self.cur_regbk.pkg.enums[_membtype[0].name])
s += self.enuml_desp_str(en, ind=ind + 1)
s += f"{ind[3]}}}{{{_default.__str__()}}}{{N/A}}\n"
s += f"{ind[2]}}}\n"
s += f"{ind.b}\\end{{regfieldtable}}\n"
return s
def reg_field_sub_sec(self, reg, regdesp):
ofs = regdesp.ofs
size = regdesp.size
rw = regdesp.rw
arr = regdesp.arr
reg_bsize = regdesp.reg_bsize
ind = Ind(0)
_name = self.l_(reg)
arr_suf = self.l_(self.cur_regbk.arr_num_suf)
arr_ofs = "" if not arr else f"+:{reg_bsize}{_name}{arr_suf}"
s = f"{ind.b}\\subsubsection{{{_name}}} \\label{{subsubsec:{reg.lower()}}}\n"
s += f"{ind[1]}\\begin{{paragitemize}}\n"
s += f"{ind[2]}\\item \\textbf{{Address Offset:}} {ofs}{arr_ofs}\n"
s += (
""
if arr == ""
else f"{ind[2]}\\item \\textbf{{Register array size:}} {_name}{arr_ofs}\n"
)
s += f"{ind[2]}\\item \\textbf{{Size:}} {size}\n"
s += f"{ind[2]}\\item \\textbf{{read/write Access:}} {rw}\n"
s += f"{ind[1]}\\end{{paragitemize}}\n"
return s
def enuml_desp_str(self, en: SVEnums, ind):
"""
Return the enum literal and optionally its value
in seperated lines. Usedful for struct type member
, parameter enum value etc.
"""
desp = f"{ind[2]}\\\\\n"
for l, n in zip(en.names, en.nums):
l = self.l_(l)
n = self.l_(n) if type(n) == str else n
desp += f"{ind[2]}{l}({n}):\\\\\n"
desp = desp[:-3] + "\n"
return desp
@SVgen.user_method
def signal_desp(self, module=None, sel=None):
module = self.dut if not module else module
self.cur_module = module
pfield = SVhier.portfield
s = ""
clk = None
last_gp = None
for p in module.ports:
_p = SVPort(p)
if sel and _p.name not in sel:
continue
name = _p.name
if _p.group != [] and _p.group[0] != last_gp:
last_gp = _p.group[0]
s += f"\n\\emptyrowbold{{3}}\n"
s += f"\\ganzinmergerowbold{{1.2}}{{3}}{{\\centering \\textbf{{{last_gp}}}}}\n"
if "rst" in name or "clk" in name:
s += self.signal_str(p, "\\TODO")
else:
s += self.signal_str(p, clk)
if "clk" in name:
clk = name.replace("_", "\_")
to_clip(s)
return s
@SVgen.user_method
def parameter_desp(self, module=None, local=True):
module = self.dut if not module else module
self.cur_module = module
param = module.paramsdetail if not local else module.paramports
s = ""
for p in param.keys():
p = module.paramsdetail[p]
if SVParam(p).paramtype != "localparam":
s += self.parameter_str(p)
to_clip(s)
return s
@SVgen.user_method
def reg_mem_map_desp(self, pkg=None):
s = ""
regbk = SVRegbk(pkg) if pkg and type(pkg) == str else self.regbk
self.cur_regbk = regbk
last_gp = None
for reg in regbk.addrs.enumls:
reg_slices = regbk.regslices.get(reg.name)
defaults = self.l_(regbk.GetDefaultsStr(reg.name, lst=True))
self.print(defaults, verbose="RegMemMap")
if defaults:
defaults.reverse()
reg_bw = regbk.params.get(f"{reg.name}_BW")
reg_bw = reg_bw.num if reg_bw else None
reg_bw_str = self.l_(regbk.GetBWStr(reg.name))
width, rw, arr, *_ = regbk.GetAddrCmt(reg.name)
reg_bw = width if not reg_bw else reg_bw
reg_bw_str = width if not reg_bw_str else reg_bw_str
tps = [i for i in regbk.regtypes.get(reg.name, [None])]
tps.reverse()
if len(tps) == 1 and tps[0] is not None and tps[0].tp == "enum":
desp = f"{Ind(3).b}\\TODO\\\\\n"
en = SVEnums(self.cur_regbk.pkg.enums[tps[0].name])
desp += self.enuml_desp_str(en, Ind(1))
else:
desp = None
regdesp = RegDesp(
regbk.regbsize,
reg_slices,
defaults,
reg_bw,
reg_bw_str,
rw,
arr,
desp,
memblst=[
"reg_bsize",
"reg_slices",
"reg_defaults",
"reg_bw",
"reg_bw_str",
"rw",
"arr",
"desp",
],
)
s += self.reg_mem_map_str(reg, regdesp)
if reg.group != [] and reg.group[0] != last_gp:
last_gp = reg.group[0]
s += f"\n{Ind(1).b}\\emptyrowbold{{5}}\n"
s += f"{Ind(1).b}\\ganzinmergerowbold{{1.2}}{{5}}{{\\centering \\textbf{{{last_gp}}}}}\n"
to_clip(s)
return s
@SVgen.user_method
def reg_field_desp(self, pkg=None):
s = ""
regbk = SVRegbk(pkg) if pkg and type(pkg) == str else self.regbk
self.cur_regbk = regbk
for reg in regbk.regslices:
ofs = regbk.addrsdict[reg].num * regbk.regbsize
ofs = hex(ofs).upper().replace("X", "x")
reg_bw = self.l_(regbk.GetBWStr(reg))
width, rw, arr, *_ = regbk.GetAddrCmt(reg)
reg_bw = width if not reg_bw else reg_bw
regdesp = RegDesp(
ofs,
reg_bw + "b",
rw,
arr,
regbk.regbsize,
memblst=["ofs", "size", "rw", "arr", "reg_bsize"],
)
s += self.reg_field_sub_sec(reg, regdesp)
defaults = self.l_(regbk.GetDefaultsStr(reg, lst=True))
if defaults:
defaults.reverse()
else:
defaults = ["" for i in regbk.regtypes[reg]]
tps = [i for i in regbk.regtypes[reg]]
tps.reverse()
membtypes = [i for i in regbk.regmembtypes[reg]]
membtypes.reverse()
desp = [f"\\TODO\\\\" for i in membtypes]
regdesp = RegDesp(
regbk.regslices[reg],
tps,
membtypes,
defaults,
rw,
desp,
memblst=[
"reg_slices",
"reg_types",
"reg_membtypes",
"reg_defaults",
"rw",
"desp",
],
)
s += self.reg_field_str(reg, regdesp)
s += "\n"
to_clip(s)
return s
def extract_desp(self, s):
regfpat = r"(\regDES{)([^}]*)(})"
pass
def reg_slice_str(self, _slice):
"""
return a list of slice string
meant for multiple slice for a same field
such as RESERVED
"""
_slice_str = ""
for _ss in _slice[1]:
if _ss[0] == _ss[1]:
_slice_str += f"{_ss[0]}, "
else:
_slice_str += f"{_ss[0]}:{_ss[1]}, "
_slice_str = _slice_str[:-2]
return _slice_str
def reg_slice_list(self, slices):
return [(_slice[0], self.reg_slice_str(_slice)) for _slice in slices]
def lbrac(self, s):
return s.replace("[", "{[").replace("]", "]}")
def l_(self, s):
if type(s) == str:
return s.replace("_", "\_") if s else None
if type(s) == list:
return [self.l_(i) for i in s]
return None
def str2_lst(self, s):
if not s:
return s
s = self.l_(s)
s = SVstr(s).replace_split([",", "'{", "{", "}"])
return s
class RegDesp:
def __init__(self, *arg, memblst=[], **kwargs):
for i, v in enumerate(arg):
self.__dict__[memblst[i]] = v
for k, v in kwargs.items():
self.__dict__[k] = v
if __name__ == "__main__":
g = LatexGen()
| 37.802935 | 134 | 0.481755 |
96e08a40e55218f1e7ae62b37394da859bd1d6eb | 195 | py | Python | fileupload/constants.py | myacera100/ai-django-fileupload | 25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec | [
"MIT"
] | null | null | null | fileupload/constants.py | myacera100/ai-django-fileupload | 25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec | [
"MIT"
] | null | null | null | fileupload/constants.py | myacera100/ai-django-fileupload | 25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec | [
"MIT"
] | null | null | null | from django.templatetags.static import static
UPLOADER_DEFAULT_THUMBNAIL = static('img/default-thumbnail.png')
UPLOADER_LOGIN_REQUIRED = False
UPLOADER_UPLOAD_DIRECTORY = 'media/attachments/'
| 24.375 | 64 | 0.835897 |
5e24bc733f0b142ea9c64d15ee861780d5bceab7 | 1,167 | py | Python | raporty_siecobywatelska_pl/ranking/views.py | kbiernat/raporty_siecobywatelska_pl | 6657f21f5668411f5ae3e2c13effd0d7cf42035f | [
"MIT"
] | null | null | null | raporty_siecobywatelska_pl/ranking/views.py | kbiernat/raporty_siecobywatelska_pl | 6657f21f5668411f5ae3e2c13effd0d7cf42035f | [
"MIT"
] | null | null | null | raporty_siecobywatelska_pl/ranking/views.py | kbiernat/raporty_siecobywatelska_pl | 6657f21f5668411f5ae3e2c13effd0d7cf42035f | [
"MIT"
] | null | null | null | from audioop import reverse
from django.db.models import Count
from django.utils.functional import cached_property
from django.views.generic import ListView, DetailView, RedirectView
from raporty_siecobywatelska_pl.ranking import models
from raporty_siecobywatelska_pl.ranking.models import Ranking
class RankingRedirect(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
return self.request.ranking.get_absolute_url()
class RankingList(ListView):
model = models.Ranking
paginate_by = 10
class RankingDetail(DetailView):
model = models.Ranking
slug_url_kwarg = "ranking_slug"
def get_queryset(self):
return super(RankingDetail, self).get_queryset()\
.prefetch_related('group_set')
def get_context_data(self, **kwargs):
return super().get_context_data(**kwargs, stats=self.stats)
@cached_property
def stats(self):
return Ranking.objects.filter(pk=self.request.ranking.pk)\
.annotate(num_institution=Count('institutions'))\
.annotate(num_article=Count('article'))\
.annotate(num_group=Count('group')).first()
| 27.785714 | 67 | 0.724079 |
aab4e2eff247aac34c8623da6afee9ad63010590 | 2,080 | py | Python | rltoolkit/algorithms/a2c/test/test_a2c.py | raznem/rlex | d24b964d80067becc81d86f6ce87e5be413b7049 | [
"MIT"
] | null | null | null | rltoolkit/algorithms/a2c/test/test_a2c.py | raznem/rlex | d24b964d80067becc81d86f6ce87e5be413b7049 | [
"MIT"
] | null | null | null | rltoolkit/algorithms/a2c/test/test_a2c.py | raznem/rlex | d24b964d80067becc81d86f6ce87e5be413b7049 | [
"MIT"
] | null | null | null | import copy
import numpy as np
import pytest
import torch
from rltoolkit.algorithms.a2c.a2c import A2C
from rltoolkit.buffer import Memory
@pytest.fixture()
def memory():
buffer = Memory()
observations = np.ones((13, 2))
actions = np.ones((12, 2))
rewards = np.array([i for i in range(10)] + [1, 2])
dones = np.zeros(12)
actions = (actions.T * rewards).T
dones[3] = 1
dones[6] = 1
dones[11] = 1
ends = copy.deepcopy(dones)
ends[9] = 1
rollouts = 0
i = 0
while rollouts < 4:
rollouts += 1
obs = observations[i]
end = False
prev_idx = buffer.add_obs(torch.tensor(obs).unsqueeze(dim=0))
while not end:
action = actions[i]
obs, rew, done, end = observations[i + 1], rewards[i], dones[i], ends[i]
next_idx = buffer.add_obs(torch.tensor(obs).unsqueeze(dim=0))
buffer.add_timestep(prev_idx, next_idx, action, actions, rew, done, end)
prev_idx = next_idx
i += 1
buffer.end_rollout()
buffer.update_obs_mean_std()
return buffer
def test_calculate_q_value(memory):
expected_result = torch.tensor(
[5.0, 6.0, 7.0, 3.0, 9.0, 10.0, 6.0, 12.0, 13.0, 14.0, 6.0, 2.0]
)
def critic_func(*args):
return torch.tensor([10])
a2c = A2C(gamma=0.5)
a2c._critic = critic_func
next_obs = memory.norm_next_obs
reward = torch.tensor(memory.rewards, dtype=torch.float32, device=a2c.device)
done = torch.tensor(memory.done, dtype=torch.float32, device=a2c.device)
result = a2c.calculate_q_val(reward, done, next_obs)
assert torch.equal(expected_result, result)
def test_get_obs_mean_std():
model = A2C(obs_norm_alpha=None)
mean, std = model._get_initial_obs_mean_std(None)
assert mean is None
assert std is None
model = A2C(obs_norm_alpha=0.9)
model.ob_dim = 2
mean, std = model._get_initial_obs_mean_std(0.9)
np.testing.assert_array_equal(mean.numpy(), np.zeros(2))
np.testing.assert_array_equal(std.numpy(), np.ones(2))
| 28.493151 | 84 | 0.635577 |
75ccf99f9fde0ccad0914678f429c04f5d2da35f | 10,512 | py | Python | timezonefinder/helpers_numba.py | Tmdean/timezonefinder | 7ddfd1c1eea459336b76673122665afd5284a2ca | [
"MIT"
] | null | null | null | timezonefinder/helpers_numba.py | Tmdean/timezonefinder | 7ddfd1c1eea459336b76673122665afd5284a2ca | [
"MIT"
] | null | null | null | timezonefinder/helpers_numba.py | Tmdean/timezonefinder | 7ddfd1c1eea459336b76673122665afd5284a2ca | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from math import asin, atan2, ceil, cos, degrees, radians, sin, sqrt
from numba import b1, f8, i2, i4, jit, typeof, u2, u8
# # for Ahead-Of-Time Compilation:
# from numba.pycc import CC
# cc = CC('compiled_helpers', )
# # Uncomment the following line to print out the compilation steps
# # cc.verbose = True
dtype_3floattuple = typeof((1.0, 1.0, 1.0))
dtype_2floattuple = typeof((1.0, 1.0))
# @cc.export('inside_polygon', 'b1(i4, i4, i4[:, :])')
@jit(b1(i4, i4, i4[:, :]), nopython=True, cache=True)
def inside_polygon(x, y, coords):
contained = False
# the edge from the last to the first point is checked first
i = -1
y1 = coords[1][-1]
y_gt_y1 = y > y1
for y2 in coords[1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coords[0][i]
x2 = coords[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
# compare the slope of the lines [p1-p2] and [p-p2]
# depending on the position of p2 this determines whether the polygon edge is right or left of the point
# to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side
# ( dy/dx > a == dy > a * dx )
if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (y2 - y) * (x2 - x1) <= (y2 - y1) * (x2 - x)):
contained = not contained
else:
if y_gt_y2:
x1 = coords[0][i]
x2 = coords[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (y2 - y) * (x2 - x1) >= (y2 - y1) * (x2 - x)):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained
# @cc.export('all_the_same', i2(u8, u8, u2[:]))
@jit(i2(u8, u8, u2[:]), nopython=True, cache=True)
def all_the_same(pointer, length, id_list):
"""
:param pointer: from that element the list is checked for equality
:param length:
:param id_list: List mustn't be empty or Null. There has to be at least one element
:return: returns the first encountered element if starting from the pointer all elements are the same,
otherwise it returns -1
"""
element = id_list[pointer]
pointer += 1
while pointer < length:
if element != id_list[pointer]:
return -1
pointer += 1
return element
# @cc.export('cartesian2rad', dtype_2floattuple(f8, f8, f8))
@jit(dtype_2floattuple(f8, f8, f8), nopython=True, cache=True)
def cartesian2rad(x, y, z):
return atan2(y, x), asin(z)
# @cc.export('cartesian2coords', dtype_2floattuple(f8, f8, f8))
@jit(dtype_2floattuple(f8, f8, f8), nopython=True, cache=True)
def cartesian2coords(x, y, z):
return degrees(atan2(y, x)), degrees(asin(z))
# @cc.export('x_rotate', dtype_3floattuple(f8, dtype_3floattuple))
@jit(dtype_3floattuple(f8, dtype_3floattuple), nopython=True, cache=True)
def x_rotate(rad, point):
# Attention: this rotation uses radians!
# x stays the same
sin_rad = sin(rad)
cos_rad = cos(rad)
return point[0], point[1] * cos_rad + point[2] * sin_rad, point[2] * cos_rad - point[1] * sin_rad
# @cc.export('y_rotate', dtype_3floattuple(f8, dtype_3floattuple))
@jit(dtype_3floattuple(f8, dtype_3floattuple), nopython=True, cache=True)
def y_rotate(rad, point):
# y stays the same
# this is actually a rotation with -rad (use symmetry of sin/cos)
sin_rad = sin(rad)
cos_rad = cos(rad)
return point[0] * cos_rad + point[2] * sin_rad, point[1], point[2] * cos_rad - point[0] * sin_rad
# @cc.export('coords2cartesian', dtype_3floattuple(f8, f8))
@jit(dtype_3floattuple(f8, f8), nopython=True, cache=True)
def coords2cartesian(lng_rad, lat_rad):
return cos(lng_rad) * cos(lat_rad), sin(lng_rad) * cos(lat_rad), sin(lat_rad)
# @cc.export('distance_to_point_on_equator', f8(f8, f8, f8))
@jit(f8(f8, f8, f8), nopython=True, cache=True)
def distance_to_point_on_equator(lng_rad, lat_rad, lng_rad_p1):
"""
uses the simplified haversine formula for this special case (lat_p1 = 0)
:param lng_rad: the longitude of the point in radians
:param lat_rad: the latitude of the point
:param lng_rad_p1: the latitude of the point1 on the equator (lat=0)
:return: distance between the point and p1 (lng_rad_p1,0) in km
this is only an approximation since the earth is not a real sphere
"""
# 2* for the distance in rad and * 12742 (mean diameter of earth) for the distance in km
return 12742 * asin(sqrt(((sin(lat_rad / 2)) ** 2 + cos(lat_rad) * (sin((lng_rad - lng_rad_p1) / 2)) ** 2)))
# @cc.export('haversine', f8(f8, f8, f8, f8))
@jit(f8(f8, f8, f8, f8), nopython=True, cache=True)
def haversine(lng_p1, lat_p1, lng_p2, lat_p2):
"""
:param lng_p1: the longitude of point 1 in radians
:param lat_p1: the latitude of point 1 in radians
:param lng_p2: the longitude of point 1 in radians
:param lat_p2: the latitude of point 1 in radians
:return: distance between p1 and p2 in km
this is only an approximation since the earth is not a real sphere
"""
# 2* for the distance in rad and * 12742(mean diameter of earth) for the distance in km
return 12742 * asin(
sqrt(((sin((lat_p1 - lat_p2) / 2)) ** 2 + cos(lat_p2) * cos(lat_p1) * (sin((lng_p1 - lng_p2) / 2)) ** 2)))
# @cc.export('compute_min_distance', f8(f8, f8, f8, f8, f8, f8, f8, f8))
@jit(f8(f8, f8, f8, f8, f8, f8, f8, f8), nopython=True, cache=True)
def compute_min_distance(lng_rad, lat_rad, p0_lng, p0_lat, pm1_lng, pm1_lat, p1_lng, p1_lat):
"""
:param lng_rad: lng of px in radians
:param lat_rad: lat of px in radians
:param p0_lng: lng of p0 in radians
:param p0_lat: lat of p0 in radians
:param pm1_lng: lng of pm1 in radians
:param pm1_lat: lat of pm1 in radians
:param p1_lng: lng of p1 in radians
:param p1_lat: lat of p1 in radians
:return: shortest distance between pX and the polygon section (pm1---p0---p1) in radians
"""
# rotate coordinate system (= all the points) so that p0 would have lat_rad=lng_rad=0 (=origin)
# z rotation is simply subtracting the lng_rad
# convert the points to the cartesian coordinate system
px_cartesian = coords2cartesian(lng_rad - p0_lng, lat_rad)
p1_cartesian = coords2cartesian(p1_lng - p0_lng, p1_lat)
pm1_cartesian = coords2cartesian(pm1_lng - p0_lng, pm1_lat)
px_cartesian = y_rotate(p0_lat, px_cartesian)
p1_cartesian = y_rotate(p0_lat, p1_cartesian)
pm1_cartesian = y_rotate(p0_lat, pm1_cartesian)
# for both p1 and pm1 separately do:
# rotate coordinate system so that this point also has lat_p1_rad=0 and lng_p1_rad>0 (p0 does not change!)
rotation_rad = atan2(p1_cartesian[2], p1_cartesian[1])
p1_cartesian = x_rotate(rotation_rad, p1_cartesian)
lng_p1_rad = atan2(p1_cartesian[1], p1_cartesian[0])
px_retrans_rad = cartesian2rad(*x_rotate(rotation_rad, px_cartesian))
# if lng_rad of px is between 0 (<-point1) and lng_rad of point 2:
# the distance between point x and the 'equator' is the shortest
# if the point is not between p0 and p1 the distance to the closest of the two points should be used
# so clamp/clip the lng_rad of px to the interval of [0; lng_rad p1] and compute the distance with it
temp_distance = distance_to_point_on_equator(px_retrans_rad[0], px_retrans_rad[1],
max(min(px_retrans_rad[0], lng_p1_rad), 0))
# ATTENTION: vars are being reused. p1 is actually pm1 here!
rotation_rad = atan2(pm1_cartesian[2], pm1_cartesian[1])
p1_cartesian = x_rotate(rotation_rad, pm1_cartesian)
lng_p1_rad = atan2(p1_cartesian[1], p1_cartesian[0])
px_retrans_rad = cartesian2rad(*x_rotate(rotation_rad, px_cartesian))
return min(temp_distance, distance_to_point_on_equator(px_retrans_rad[0], px_retrans_rad[1],
max(min(px_retrans_rad[0], lng_p1_rad), 0)))
# @cc.export('int2coord', f8(i4))
@jit(f8(i4), nopython=True, cache=True)
def int2coord(i4):
return float(i4 / 10 ** 7)
# @cc.export('coord2int', i4(f8))
@jit(i4(f8), nopython=True, cache=True)
def coord2int(double):
return int(double * 10 ** 7)
# @cc.export('distance_to_polygon_exact', f8(f8, f8, i4, i4[:, :], f8[:, :]))
@jit(f8(f8, f8, i4, i4[:, :], f8[:, :]), nopython=True, cache=True)
def distance_to_polygon_exact(lng_rad, lat_rad, nr_points, points, trans_points):
# transform all points (int) to coords (float)
for i in range(nr_points):
trans_points[0][i] = radians(int2coord(points[0][i]))
trans_points[1][i] = radians(int2coord(points[1][i]))
# check points -2, -1, 0 first
pm1_lng = trans_points[0][0]
pm1_lat = trans_points[1][0]
p1_lng = trans_points[0][-2]
p1_lat = trans_points[1][-2]
min_distance = compute_min_distance(lng_rad, lat_rad, trans_points[0][-1], trans_points[1][-1], pm1_lng, pm1_lat,
p1_lng, p1_lat)
index_p0 = 1
index_p1 = 2
for i in range(int(ceil((nr_points / 2) - 1))):
p1_lng = trans_points[0][index_p1]
p1_lat = trans_points[1][index_p1]
min_distance = min(min_distance,
compute_min_distance(lng_rad, lat_rad, trans_points[0][index_p0], trans_points[1][index_p0],
pm1_lng, pm1_lat, p1_lng, p1_lat))
index_p0 += 2
index_p1 += 2
pm1_lng = p1_lng
pm1_lat = p1_lat
return min_distance
# @cc.export('distance_to_polygon', f8(f8, f8, i4, i4[:, :]))
@jit(f8(f8, f8, i4, i4[:, :]), nopython=True, cache=True)
def distance_to_polygon(lng_rad, lat_rad, nr_points, points):
# the maximum possible distance is half the perimeter of earth pi * 12743km = 40,054.xxx km
min_distance = 40100000
for i in range(nr_points):
min_distance = min(min_distance, haversine(lng_rad, lat_rad, radians(int2coord(points[0][i])),
radians(int2coord(points[1][i]))))
return min_distance
# if __name__ == "__main__":
# cc.compile()
| 41.223529 | 120 | 0.641457 |
201be016bf89436624195239791807e2dbca7e4a | 1,887 | py | Python | q2_types/distance_matrix/tests/test_format.py | Leo-Simpson/q2-types | 2946fda4fe2817fde1646ddcdc8e8ebf41abe5a4 | [
"BSD-3-Clause"
] | null | null | null | q2_types/distance_matrix/tests/test_format.py | Leo-Simpson/q2-types | 2946fda4fe2817fde1646ddcdc8e8ebf41abe5a4 | [
"BSD-3-Clause"
] | null | null | null | q2_types/distance_matrix/tests/test_format.py | Leo-Simpson/q2-types | 2946fda4fe2817fde1646ddcdc8e8ebf41abe5a4 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import shutil
import unittest
from q2_types.distance_matrix import LSMatFormat, DistanceMatrixDirectoryFormat
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugin import ValidationError
class TestFormats(TestPluginBase):
package = 'q2_types.distance_matrix.tests'
def test_lsmat_format_validate_positive(self):
filenames = ('distance-matrix-1x1.tsv', 'distance-matrix-2x2.tsv',
'distance-matrix-NxN.tsv')
for filename in filenames:
filepath = self.get_data_path(filename)
format = LSMatFormat(filepath, mode='r')
# Should not error.
format.validate()
def test_lsmat_format_validate_negative(self):
filepath = self.get_data_path('not-lsmat')
format = LSMatFormat(filepath, mode='r')
with self.assertRaisesRegex(ValidationError, 'LSMat'):
format.validate()
def test_distance_matrix_directory_format(self):
# This test exists mainly to assert that the single-file directory
# format is defined and functional. More extensive testing is performed
# on its underlying format (LSMatFormat).
filepath = self.get_data_path('distance-matrix-NxN.tsv')
shutil.copy(filepath,
os.path.join(self.temp_dir.name, 'distance-matrix.tsv'))
format = DistanceMatrixDirectoryFormat(self.temp_dir.name, mode='r')
# Should not error.
format.validate()
if __name__ == "__main__":
unittest.main()
| 35.603774 | 79 | 0.63487 |
ce71aa19be52b6003a704ffc8686cbeedf1a7b16 | 1,161 | py | Python | src/olympia/addons/api_urls.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T10:02:15.000Z | 2020-12-03T10:02:15.000Z | src/olympia/addons/api_urls.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/addons/api_urls.py | makyen/Mozilla-addons-server | 555d9f31cc4b00799466f16c8809edd5f1858ab8 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from olympia.activity.views import VersionReviewNotesViewSet
from .views import (
AddonFeaturedView, AddonSearchView, AddonVersionViewSet, AddonViewSet,
StaticCategoryView)
addons = SimpleRouter()
addons.register(r'addon', AddonViewSet)
# Router for children of /addons/addon/{addon_pk}/.
sub_addons = NestedSimpleRouter(addons, r'addon', lookup='addon')
sub_addons.register('versions', AddonVersionViewSet, base_name='addon-version')
sub_versions = NestedSimpleRouter(sub_addons, r'versions', lookup='version')
sub_versions.register(r'reviewnotes', VersionReviewNotesViewSet,
base_name='version-reviewnotes')
urlpatterns = [
url(r'', include(addons.urls)),
url(r'', include(sub_addons.urls)),
url(r'', include(sub_versions.urls)),
url(r'^search/$', AddonSearchView.as_view(), name='addon-search'),
url(r'^featured/$', AddonFeaturedView.as_view(), name='addon-featured'),
url(r'^categories/$', StaticCategoryView.as_view(), name='category-list'),
]
| 37.451613 | 79 | 0.752799 |
594d795ecd54c724bde8f6c67c16cc8f07734722 | 3,959 | py | Python | qa/rpc-tests/maxblocksinflight.py | mirzaei-ce/linux-ausbit | 6d4f8ba69c46ce5c03c79fbaecaec69c6a54acd5 | [
"MIT"
] | null | null | null | qa/rpc-tests/maxblocksinflight.py | mirzaei-ce/linux-ausbit | 6d4f8ba69c46ce5c03c79fbaecaec69c6a54acd5 | [
"MIT"
] | null | null | null | qa/rpc-tests/maxblocksinflight.py | mirzaei-ce/linux-ausbit | 6d4f8ba69c46ce5c03c79fbaecaec69c6a54acd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import AusbitTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
try:
fail = False
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [ 8, 16, 128, 1024 ]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1<<256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
except AssertionError as e:
print "TEST FAILED: ", e.args
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(AusbitTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("AUSBITD", "ausbitd"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| 39.19802 | 114 | 0.608234 |
fe91f28ddad3f6a55822aeb8bf0de8ea1d52fddd | 7,895 | py | Python | Projects/PlantMaker/archive/20100520/src/scheduler.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 2 | 2021-01-24T09:00:51.000Z | 2022-01-23T20:52:17.000Z | Projects/PlantMaker/archive/20100520/src/scheduler.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 6 | 2020-02-29T01:59:03.000Z | 2022-02-15T10:25:40.000Z | Projects/PlantMaker/archive/20100520/src/scheduler.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 1 | 2019-03-22T14:41:21.000Z | 2019-03-22T14:41:21.000Z | """
This module provides the main Scheduler logic of the program.
"""
from constraint import Problem
from constraints import MachineBreaksConstraint
from printer import pprint, BLUE, YELLOW, RED
class Scheduler(object):
"""
This class provides the constraint-based Scheduler.
"""
def __init__(self, plant, orderList):
"""
plant is a Plant instance to run the Scheduler on.
orderList is the OrderList instance of incoming orders to the Plant.
problem is a python-constraint Problem instance where solver is used as
the constraint solver.
"""
assert plant != None
assert orderList != None
self.printing = True
self.plant = plant
self.orderList = orderList
self.problem = Problem()
self.endMargin = 1
self.machineMargin = 1
def createMachineQuantityVarName(self, machine):
"""
Creates and returns a python-constraint Variable name from a Machine
instance.
"""
assert type(machine) != str or type(machine) != unicode
return str(machine.name) + "-quantity"
def createEnterTimeVarName(self, order, machine):
"""
Creates and returns a python-constraint Variable name from an Order
instance and a Machine instance.
"""
if type(machine) == str or type(machine) == unicode:
machineName = machine
else:
machineName = machine.name
return str(str(order.id) + "-enter-" + machineName)
def createTimeAtMachineVarName(self, order, machine):
"""
Creates and returns a python-constraint Variable name from an Order
instance and a Machine instance.
"""
if type(machine) == str or type(machine) == unicode:
machineName = machine
else:
machineName = machine.name
return str(str(order.id) + "-spend-" + machineName)
def addPrecedenceConstraint(self, enterVar, order, machineIndex):
"""
Adds a python-constraint Variable and Constraint to an order for the
precedence of Machine instances. Meaning that an order cannot get into
Machine 2 before getting into Machine 1. The sequence is determined by
the Plant instance.
"""
prevMachine = self.plant.machines[machineIndex - 1]
enterVar2 = self.createEnterTimeVarName(order, prevMachine)
spendVar2 = self.createTimeAtMachineVarName(order, prevMachine)
if order.recipe[prevMachine.name] != 0:
if prevMachine.quantity <= \
self.plant.machines[machineIndex].quantity \
and prevMachine.canUnhook == False:
self.problem.addConstraint(lambda x, y, yt: x == y + yt + \
self.plant.craneMoveTime, [enterVar, enterVar2, spendVar2])
else:
self.problem.addConstraint(lambda x, y, yt: x >= y + yt + \
self.plant.craneMoveTime, [enterVar, enterVar2, spendVar2])
def addFinishTimeVar(self, order):
"""
Adds a python-constraint Variable and Constraint to an order for the
finish time on the Plant.
"""
var = str(order.id) + "-finish"
lastMachine = self.plant.machines[-1]
self.problem.addVariable(var, range(order.deadline - self.endMargin,
order.deadline + self.endMargin))
self.problem.addConstraint(lambda x, y, yt: x == y + yt,
[var, self.createEnterTimeVarName(order, lastMachine),
self.createTimeAtMachineVarName(order, lastMachine)])
def addOrderEnterTimeAtMachineVar(self, order, machineName, machineIndex):
"""
Adds a python-constraint Variable and Constraint to an order for the
entrance time at a Machine instance.
"""
var = self.createEnterTimeVarName(order, machineName)
if order.recipe[machineName] != 0:
machineStart = (order.deadline + self.endMargin) - \
order.recipe.calcMinProcTime(self.plant, machineName) - \
self.machineMargin
machineEnd = machineStart + self.machineMargin + \
min(self.endMargin, self.machineMargin)
variableRange = range(max(machineStart, 0), machineEnd)
else:
variableRange = range(0, 1)
self.problem.addVariable(var, variableRange)
if machineIndex != 0:
self.addPrecedenceConstraint(var, order, machineIndex)
def machineQuantityConstraintFunc(self, *args):
quantity = args[0]
argsMiddle = (len(args) - 1) / 2
enterTimes = args[1:argsMiddle + 1]
spendTimes = args[argsMiddle + 1:]
assert len(enterTimes) == len(spendTimes)
numberOfCommons = 0
for i, et in enumerate(enterTimes):
range1 = range(et, et + spendTimes[i])
numberOfCommons = 0
for j, et2 in enumerate(enterTimes):
if i != j:
range2 = range(et2, et2 + spendTimes[j])
for v1 in range1:
if v1 in range2:
numberOfCommons += 1
break
return not (numberOfCommons >= quantity)
def addMachineQuantityConstraint(self, machine):
enterVars = []
spendVars = []
for order in self.orderList.orders:
enterVars.append(self.createEnterTimeVarName(order, machine))
spendVars.append(self.createTimeAtMachineVarName(order, machine))
vars = [self.createMachineQuantityVarName(machine)] + \
enterVars + spendVars
self.problem.addConstraint(self.machineQuantityConstraintFunc, vars)
def machineCapacityConstraintFunc(self, *args):
argsMiddle = len(args) / 2
enterTimes = args[0:argsMiddle]
nextEnterTimes = args[argsMiddle:]
for i, et in enumerate(enterTimes):
for j, et2 in enumerate(enterTimes):
if i != j:
if et > et2 and nextEnterTimes[i] < nextEnterTimes[j]:
return False
return True
def addCapacityConstraint(self, machine, machineIndex):
enterVars = []
nextEnterVars = []
nextMachine = self.plant.machines[machineIndex + 1]
for order in self.orderList.orders:
enterVars.append(self.createEnterTimeVarName(order, machine))
nextEnterVars.append(self.createEnterTimeVarName(order,
nextMachine))
self.problem.addConstraint(self.machineCapacityConstraintFunc,
enterVars + nextEnterVars)
def run(self):
"""
Runs the main Scheduler logic.
"""
for machine in self.plant.machines:
var = self.createMachineQuantityVarName(machine)
self.problem.addVariable(var, [machine.quantity])
for machine in self.plant.machines:
for order in self.orderList.orders:
var = self.createTimeAtMachineVarName(order, machine)
self.problem.addVariable(var, [order.recipe[machine.name]])
for machineIndex, machine in enumerate(self.plant.machines):
for order in self.orderList.orders:
self.addOrderEnterTimeAtMachineVar(order, machine.name,
machineIndex)
for machineIndex, machine in enumerate(self.plant.machines):
if machine.precedence == True and \
machineIndex != len(self.plant.machines) - 1:
self.addCapacityConstraint(machine, machineIndex)
self.addMachineQuantityConstraint(machine)
for machineIndex, machine in enumerate(self.plant.machines):
if len(machine.setOfBreaks()) != 0:
for order in self.orderList.orders:
enterVar = self.createEnterTimeVarName(order, machine)
self.problem.addConstraint(
MachineBreaksConstraint(order, machine), [enterVar])
for order in self.orderList.orders:
self.addFinishTimeVar(order)
pprint("SCHED Computing solutions...", BLUE, self.printing)
solutions = self.problem.getSolutions()
return solutions, len(solutions)
def start(self, endMarginLimit = 10, machineMarginLimit = 10):
pprint("SCHED Started...", BLUE, self.printing)
self.endMargin = 1
while self.endMargin <= endMarginLimit:
self.machineMargin = 1
machineMarginLimit = self.endMargin
while self.machineMargin <= machineMarginLimit:
try:
pprint("SCHED End Margin: %d, Machine Margin: %d" % \
(self.endMargin, self.machineMargin), YELLOW, self.printing)
self.problem.reset()
solutions, numberOfSolutions = self.run()
if numberOfSolutions > 0:
return solutions
except Exception as e:
pprint("SCHED Exception " + str(e), RED)
pprint("SCHED Trying new value for End Margin.", RED)
endMarginLimit += 1
self.machineMargin += 1
self.endMargin += 1
pprint("SCHED No solutions found.", RED, self.printing)
return None
| 33.739316 | 75 | 0.717796 |
850ceb07c85f337c1606f876f5ea09488d925719 | 7,685 | py | Python | MoSwA.py | macelik/MoSwA | 92fc74a207cbdc309eeac337dac1528a0596f564 | [
"MIT"
] | 3 | 2021-08-31T12:37:11.000Z | 2022-01-12T09:33:40.000Z | MoSwA.py | macelik/MoSwA | 92fc74a207cbdc309eeac337dac1528a0596f564 | [
"MIT"
] | 1 | 2021-08-05T09:35:42.000Z | 2021-10-02T16:22:54.000Z | MoSwA.py | macelik/MoSwA | 92fc74a207cbdc309eeac337dac1528a0596f564 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from argparse import ArgumentParser
from build import RunAndCompare,SplitsMergers,BuildConsensus,Clustering,PlotHotClusters
from build import NetworkPlot,report,align
import os,random,json
def is_valid_file(parser, arg):
if not os.path.isfile(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg # return an open file handle
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise parser.error("%s is an invalid positive int value" % value)
return ivalue
def create_dir(out):
path = os.path.join(os.getcwd(), out)
try:
os.mkdir(path)
print("Directory {} created successfully".format(out))
except OSError as error:
print("Directory '%s' exists" % out)
rand=random.randint(1000,5000)
path=path+"_"+str(rand)
os.mkdir(path)
print("Directory {}_{} created instead" .format(out,rand))
return path
parser = ArgumentParser(description="Motif finder")
parser.add_argument("-i", dest="filename", required=True,
help="json file input", metavar="FILE",
type=lambda x: is_valid_file(parser, x))
#parser.add_argument("-k", default=9, type=int, help="This is the k-mer size")
parser.add_argument("-m",
choices=["index", "major", "minor", "unique","all"],
nargs="+",
default="all", type=str, help="motif sites to analyze")
parser.add_argument("-t", type=check_positive,default=30,
help="Minimum number of seqeunces at a position")
parser.add_argument("-k", type=check_positive,default=9,
help="Minimum number of seqeunces at a position")
parser.add_argument("-o", help="Output directory name", required=True)
def create_dic():
dict1={}
dict1['I => M']=indextomajor+split_itom
dict1['M => I']=mtoi+merge_mtoi
dict1['I => Mi']=indextominor+split_itomi
dict1['Mi => I']=mitoi+merge_mitoi
dict1['I => U']=indextounique+split_itou
dict1['U => I']=utoi+merge_utoi
dict1['I => +']=igain
dict1['I => -']=indexloss
dict1['M => Mi']=majortominor+split_mtomi
dict1['Mi => M']=mitom+merge_mitom
dict1['M => U']=majortounique+split_mtou
dict1['U => M']=utom+merge_utom
dict1['M => +']=mgain
dict1['M => -']=majorloss
dict1['Mi => U']=minortounique+split_mitou
dict1['U => Mi']=utomi+merge_utomi
dict1['Mi => +']=migain
dict1['Mi => -']=minorloss
dict1['U => +']=ugain
dict1['U => -']=uniqueloss
return dict1
args = parser.parse_args()
dima_kmer = args.k
which=args.m
thold=args.t
fname=args.filename
out=args.o
path=create_dir(out)
if 'all' in which:
which='index','major','minor','unique'
RunAndCompare.path = path
analyse = RunAndCompare.GetMotifs(fname,dima_kmer)
common_pos,lengthofswitches,Raw_Indexes,Filtered_Indexes,Raw_Majors,Filtered_Majors,Raw_Minors,Filtered_Minors,Raw_Uniques,Filtered_Uniques=RunAndCompare.runit(which,dima_kmer,analyse)
common_pos.sort()
SplitsMergers.analyse = analyse
vis=SplitsMergers.view_json()
indexloss,indextomajor,indextominor,indextounique,vis = SplitsMergers.analyze_index(Raw_Indexes,vis)
majorloss,majortoindex,majortominor,majortounique,vis = SplitsMergers.analyze_majors(Raw_Majors,vis)
minorloss,minortoindex,minortomajor,minortounique,vis = SplitsMergers.analyze_minors(Raw_Minors,vis)
uniqueloss,uniquetoindex,uniquetomajor,uniquetominor,vis = SplitsMergers.analyze_uniques(Raw_Uniques,vis)
mtoi,mitoi,utoi,merge_mtoi,merge_mitoi,merge_utoi,vis = SplitsMergers.i_splits_mergers(Filtered_Indexes,Raw_Indexes,Raw_Majors,Raw_Minors,Raw_Uniques,majortoindex,minortoindex,uniquetoindex,vis)
igain,vis = SplitsMergers.gain('Index',vis,Raw_Indexes,Raw_Majors)
mgain,vis = SplitsMergers.gain('Major',vis,Raw_Indexes,Raw_Majors)
split_itom,itom,mitom,merge_mitom,utom,merge_utom,vis = SplitsMergers.m_splits_mergers(Filtered_Majors,
Raw_Majors,
Raw_Indexes,
Raw_Minors,
Raw_Uniques,
indextomajor,
minortomajor,
uniquetomajor,
vis)
split_itomi,split_mtomi,mtomi,utomi,merge_utomi,migain,vis = SplitsMergers.mi_splits_mergers(analyse,indextominor,majortominor,uniquetominor,Raw_Indexes,Raw_Majors,Raw_Minors,Raw_Uniques,vis)
split_itou,split_mtou,split_mitou,ugain,vis = SplitsMergers.u_splits_mergers(analyse,indextounique,majortounique,minortounique,Raw_Indexes,Raw_Majors,Raw_Minors,Raw_Uniques,vis)
to_build,no_sup,low_sup,highest,average = BuildConsensus.building_index(analyse,thold,dima_kmer)
cons = BuildConsensus.ConsensusInput.parse_consensus_input(to_build)
cons = BuildConsensus.calculate_consensus(consensus_input=cons)
unable_to_analyze = BuildConsensus.unable_to_analyze(analyse,Filtered_Indexes,Filtered_Majors,dima_kmer,cons)
Filtered_Uniques,Filtered_Minors,Filtered_Majors,Filtered_Indexes = BuildConsensus.check_which(which,Filtered_Indexes,Filtered_Majors,Filtered_Minors,Filtered_Uniques)
consensus = BuildConsensus.build_consensus(Filtered_Indexes, Filtered_Majors, Filtered_Minors, Filtered_Uniques, to_build, thold, os.path.join(path, "Consensus.aln"))
cluster,r_cluster = Clustering.cluster_it(common_pos,no_sup,low_sup)
Clustering.CnandCs(r_cluster,dima_kmer)
print("Plotting Clusters and Hotspots")
hots,r_hots = Clustering.hotspots(r_cluster)
plotin = PlotHotClusters.plotit(r_cluster,dima_kmer)
fig = PlotHotClusters.draw(r_cluster,r_hots,plotin,path)
print("Plotting Network Graph")
dict1 = create_dic()
dict1 = NetworkPlot.check_arg(dict1,which)
repos,D,connection = NetworkPlot.network(dict1)
sandm={'split_itom':split_itom,'split_itomi':split_itomi,'split_itou':split_itou,
'split_mtomi':split_mtomi,'split_mtou':split_mtou,'split_mitou':split_mitou,
'merge_mtoi':merge_mtoi,'merge_mitoi':merge_mitoi,'merge_utoi':merge_utoi,
'merge_mitom':merge_mitom,'merge_utom':merge_utom,'merge_utomi':merge_utomi}
s_edges,s_nodes = NetworkPlot.plotly_edges(repos,D,connection,sandm)
data = NetworkPlot.plotly_info(s_edges,s_nodes)
NetworkPlot.out_plotly(data,path)
NetworkPlot.fix_html(path)
alignment_length=len(analyse.results)+dima_kmer-1
summed=report.write_report(average,thold,no_sup,low_sup,common_pos,lengthofswitches,unable_to_analyze,alignment_length,highest,path)
vis=report.report_json(average,thold,no_sup,low_sup,common_pos,lengthofswitches,unable_to_analyze,alignment_length,highest,summed,dima_kmer,vis)
align,fix_align,vis = align.align_i(analyse,Filtered_Indexes,indextomajor,merge_mtoi,indextominor,merge_mitoi,indextounique,majortoindex,minortoindex,uniquetoindex,igain,vis,path)
if "index" not in which:
del vis['MoSwa_Output']['Report']['Results']['Index_Motifs']
if "major" not in which:
del vis['MoSwa_Output']['Report']['Results']['Major_Motifs']
if "minor" not in which:
del vis['MoSwa_Output']['Report']['Results']['Minor_Motifs']
if "unique" not in which:
del vis['MoSwa_Output']['Report']['Results']['Unique_Motifs']
f = open(os.path.join(path, "Results.json"),'w')
json.dump(vis,f)
f.close()
| 50.228758 | 194 | 0.690696 |
0e3f3910b5ac236f25134b82c56d14691c26bb66 | 847 | py | Python | tncProject/wsgi.py | haueyne/tnc-site | 8e5c853b5f48598210c2733a33039f77bd97762a | [
"MIT"
] | null | null | null | tncProject/wsgi.py | haueyne/tnc-site | 8e5c853b5f48598210c2733a33039f77bd97762a | [
"MIT"
] | 3 | 2020-06-05T18:08:54.000Z | 2021-06-10T20:15:38.000Z | tncProject/wsgi.py | haueyne/tnc-site | 8e5c853b5f48598210c2733a33039f77bd97762a | [
"MIT"
] | null | null | null | """
WSGI config for tncProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import threading
import requests
import time
from whitenoise.django import DjangoWhiteNoise
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tncProject.settings")
application = DjangoWhiteNoise(get_wsgi_application())
def awake():
minute = 60
awake_url = os.environ.get('AWAKE_URL')
while awake_url:
try:
print('Start Awaking')
requests.get(awake_url)
print("End")
except Exception:
print("error")
time.sleep(5 * minute)
t = threading.Thread(target=awake)
# t.start()
| 21.175 | 78 | 0.698937 |
8a77de9c326d98c2045a1274f54128e3da4b41d1 | 5,478 | py | Python | django/contrib/gis/geos/tests/test_geos_mutation.py | vsajip/django | bb41826571f5e12f6f60daf3639e7d8c063d163e | [
"BSD-3-Clause"
] | 3 | 2015-10-14T09:13:48.000Z | 2021-01-01T06:31:25.000Z | django/contrib/gis/geos/tests/test_geos_mutation.py | vsajip/django | bb41826571f5e12f6f60daf3639e7d8c063d163e | [
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/geos/tests/test_geos_mutation.py | vsajip/django | bb41826571f5e12f6f60daf3639e7d8c063d163e | [
"BSD-3-Clause"
] | 1 | 2015-10-14T09:13:48.000Z | 2015-10-14T09:13:48.000Z | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.contrib.gis.geos import *
from django.contrib.gis.geos.error import GEOSIndexError
from django.utils import unittest
from django.utils import six
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, six.lrange(1))
self.assertRaises(TypeError, Point, six.lrange(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*six.lmap(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, six.lmap(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*six.lmap(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSMutationTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| 39.985401 | 104 | 0.581964 |
1507296711d111655ac4311d4b56d93d6c880ed8 | 127 | py | Python | finite-mdp/finite_mdp/__init__.py | DinisMoreira/Dissert | d59a810992a32a0e033a58204afbbc386958dd32 | [
"MIT"
] | null | null | null | finite-mdp/finite_mdp/__init__.py | DinisMoreira/Dissert | d59a810992a32a0e033a58204afbbc386958dd32 | [
"MIT"
] | null | null | null | finite-mdp/finite_mdp/__init__.py | DinisMoreira/Dissert | d59a810992a32a0e033a58204afbbc386958dd32 | [
"MIT"
] | 2 | 2021-09-21T14:53:34.000Z | 2022-01-04T09:39:37.000Z | from gym.envs.registration import register
register(
id='finite-mdp-v0',
entry_point='finite_mdp.envs:FiniteMDPEnv',
) | 21.166667 | 47 | 0.748031 |
e211a84bcde5ea452ce7ae0d26dfd93c779afdab | 358 | py | Python | hottbox/core/__init__.py | bscalzo/hottbox | 332620b34971511a3b59df4dee7f9d9b17cb4b46 | [
"Apache-2.0"
] | null | null | null | hottbox/core/__init__.py | bscalzo/hottbox | 332620b34971511a3b59df4dee7f9d9b17cb4b46 | [
"Apache-2.0"
] | null | null | null | hottbox/core/__init__.py | bscalzo/hottbox | 332620b34971511a3b59df4dee7f9d9b17cb4b46 | [
"Apache-2.0"
] | null | null | null | """
This module contains methods for the most common operations within multilinear algebra and
classes for the tensors represented through various tensor decompositions
"""
from .structures import Tensor, TensorCPD, TensorTKD, TensorTT, super_diag_tensor, residual_tensor
from .operations import khatri_rao, hadamard, kronecker, mode_n_product, unfold, fold | 51.142857 | 98 | 0.832402 |
7ee48f5bf930758f217f70d2afa5a744f06da8f0 | 9,105 | py | Python | sdk/lusid/models/bucketed_cash_flow_response.py | finbourne/lusid-sdk-python-preview | 6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee | [
"MIT"
] | 5 | 2019-06-01T11:37:48.000Z | 2021-08-23T17:37:33.000Z | sdk/lusid/models/bucketed_cash_flow_response.py | finbourne/lusid-sdk-python-preview | 6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee | [
"MIT"
] | 98 | 2020-04-15T06:05:43.000Z | 2022-03-01T10:25:25.000Z | sdk/lusid/models/bucketed_cash_flow_response.py | finbourne/lusid-sdk-python-preview | 6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee | [
"MIT"
] | 9 | 2019-09-30T11:19:25.000Z | 2021-11-17T19:49:59.000Z | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3648
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class BucketedCashFlowResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'href': 'str',
'data': 'list[dict(str, object)]',
'report_currency': 'str',
'data_schema': 'ResultDataSchema',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'data': 'data',
'report_currency': 'reportCurrency',
'data_schema': 'dataSchema',
'links': 'links'
}
required_map = {
'href': 'optional',
'data': 'optional',
'report_currency': 'optional',
'data_schema': 'optional',
'links': 'optional'
}
def __init__(self, href=None, data=None, report_currency=None, data_schema=None, links=None, local_vars_configuration=None): # noqa: E501
"""BucketedCashFlowResponse - a model defined in OpenAPI"
:param href:
:type href: str
:param data: List of dictionary bucketed cash flow result set. Each dictionary represent a bucketed cashflow result set keyed by AddressKeys. e.g. dictionary[\"Valuation/CashFlowAmount\"] for the aggregated cash flow amount for the bucket. e.g. suppose \"RoundUp\" method, then dictionary[\"Valuation/CashFlowDate/RoundUp\"] returns the bucketed cashflow date.
:type data: list[dict(str, object)]
:param report_currency: Three letter ISO currency string indicating what currency to report in for ReportCcy denominated queries. If not present then the currency of the relevant portfolio will be used in its place where relevant.
:type report_currency: str
:param data_schema:
:type data_schema: lusid.ResultDataSchema
:param links: Collection of links.
:type links: list[lusid.Link]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._href = None
self._data = None
self._report_currency = None
self._data_schema = None
self._links = None
self.discriminator = None
self.href = href
self.data = data
self.report_currency = report_currency
if data_schema is not None:
self.data_schema = data_schema
self.links = links
@property
def href(self):
"""Gets the href of this BucketedCashFlowResponse. # noqa: E501
:return: The href of this BucketedCashFlowResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this BucketedCashFlowResponse.
:param href: The href of this BucketedCashFlowResponse. # noqa: E501
:type href: str
"""
self._href = href
@property
def data(self):
"""Gets the data of this BucketedCashFlowResponse. # noqa: E501
List of dictionary bucketed cash flow result set. Each dictionary represent a bucketed cashflow result set keyed by AddressKeys. e.g. dictionary[\"Valuation/CashFlowAmount\"] for the aggregated cash flow amount for the bucket. e.g. suppose \"RoundUp\" method, then dictionary[\"Valuation/CashFlowDate/RoundUp\"] returns the bucketed cashflow date. # noqa: E501
:return: The data of this BucketedCashFlowResponse. # noqa: E501
:rtype: list[dict(str, object)]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this BucketedCashFlowResponse.
List of dictionary bucketed cash flow result set. Each dictionary represent a bucketed cashflow result set keyed by AddressKeys. e.g. dictionary[\"Valuation/CashFlowAmount\"] for the aggregated cash flow amount for the bucket. e.g. suppose \"RoundUp\" method, then dictionary[\"Valuation/CashFlowDate/RoundUp\"] returns the bucketed cashflow date. # noqa: E501
:param data: The data of this BucketedCashFlowResponse. # noqa: E501
:type data: list[dict(str, object)]
"""
self._data = data
@property
def report_currency(self):
"""Gets the report_currency of this BucketedCashFlowResponse. # noqa: E501
Three letter ISO currency string indicating what currency to report in for ReportCcy denominated queries. If not present then the currency of the relevant portfolio will be used in its place where relevant. # noqa: E501
:return: The report_currency of this BucketedCashFlowResponse. # noqa: E501
:rtype: str
"""
return self._report_currency
@report_currency.setter
def report_currency(self, report_currency):
"""Sets the report_currency of this BucketedCashFlowResponse.
Three letter ISO currency string indicating what currency to report in for ReportCcy denominated queries. If not present then the currency of the relevant portfolio will be used in its place where relevant. # noqa: E501
:param report_currency: The report_currency of this BucketedCashFlowResponse. # noqa: E501
:type report_currency: str
"""
self._report_currency = report_currency
@property
def data_schema(self):
"""Gets the data_schema of this BucketedCashFlowResponse. # noqa: E501
:return: The data_schema of this BucketedCashFlowResponse. # noqa: E501
:rtype: lusid.ResultDataSchema
"""
return self._data_schema
@data_schema.setter
def data_schema(self, data_schema):
"""Sets the data_schema of this BucketedCashFlowResponse.
:param data_schema: The data_schema of this BucketedCashFlowResponse. # noqa: E501
:type data_schema: lusid.ResultDataSchema
"""
self._data_schema = data_schema
@property
def links(self):
"""Gets the links of this BucketedCashFlowResponse. # noqa: E501
Collection of links. # noqa: E501
:return: The links of this BucketedCashFlowResponse. # noqa: E501
:rtype: list[lusid.Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this BucketedCashFlowResponse.
Collection of links. # noqa: E501
:param links: The links of this BucketedCashFlowResponse. # noqa: E501
:type links: list[lusid.Link]
"""
self._links = links
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BucketedCashFlowResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BucketedCashFlowResponse):
return True
return self.to_dict() != other.to_dict()
| 34.751908 | 372 | 0.630203 |
b20337576934644d88b1848ffb1b624bff01e682 | 5,874 | py | Python | homeassistant/components/mitemp_bt/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 4 | 2020-07-29T17:47:10.000Z | 2020-09-16T13:39:13.000Z | homeassistant/components/mitemp_bt/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-11-08T19:40:10.000Z | 2022-03-01T11:11:07.000Z | homeassistant/components/mitemp_bt/sensor.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Support for Xiaomi Mi Temp BLE environmental sensor."""
import logging
import btlewrap
from btlewrap.base import BluetoothBackendException
from mitemp_bt import mitemp_bt_poller
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_CACHE = "cache_value"
CONF_MEDIAN = "median"
CONF_RETRIES = "retries"
CONF_TIMEOUT = "timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_UPDATE_INTERVAL = 300
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "MiTemp BT"
DEFAULT_RETRIES = 2
DEFAULT_TIMEOUT = 10
# Sensor types are defined like: Name, units
SENSOR_TYPES = {
"temperature": [DEVICE_CLASS_TEMPERATURE, "Temperature", TEMP_CELSIUS],
"humidity": [DEVICE_CLASS_HUMIDITY, "Humidity", UNIT_PERCENTAGE],
"battery": [DEVICE_CLASS_BATTERY, "Battery", UNIT_PERCENTAGE],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRIES, default=DEFAULT_RETRIES): cv.positive_int,
vol.Optional(CONF_CACHE, default=DEFAULT_UPDATE_INTERVAL): cv.positive_int,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MiTempBt sensor."""
backend = BACKEND
_LOGGER.debug("MiTempBt is using %s backend.", backend.__name__)
cache = config.get(CONF_CACHE)
poller = mitemp_bt_poller.MiTempBtPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
poller.ble_timeout = config.get(CONF_TIMEOUT)
poller.retries = config.get(CONF_RETRIES)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
device = SENSOR_TYPES[parameter][0]
name = SENSOR_TYPES[parameter][1]
unit = SENSOR_TYPES[parameter][2]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiTempBtSensor(poller, parameter, device, name, unit, force_update, median)
)
add_entities(devs)
class MiTempBtSensor(Entity):
"""Implementing the MiTempBt sensor."""
def __init__(self, poller, parameter, device, name, unit, force_update, median):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._device = device
self._unit = unit
self._name = name
self._state = None
self.data = []
self._force_update = force_update
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def device_class(self):
"""Device class of this entity."""
return self._device
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except OSError as ioerr:
_LOGGER.warning("Polling error %s", ioerr)
return
except BluetoothBackendException as bterror:
_LOGGER.warning("Polling error %s", bterror)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
self.data.append(data)
else:
_LOGGER.warning(
"Did not receive any data from Mi Temp sensor %s", self.name
)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
else:
_LOGGER.debug("Not yet enough data for median calculation")
| 30.915789 | 87 | 0.655601 |
9a1165dfdb989fc6937714a8a2bb01ee352db77f | 31,319 | py | Python | Valkyries/FASTQ_Tools.py | Gaorav-Gupta-Lab/Volundr | 0142c03b233e15609ad0688ef918aecc45522430 | [
"MIT"
] | 1 | 2020-10-28T15:36:57.000Z | 2020-10-28T15:36:57.000Z | Valkyries/FASTQ_Tools.py | pkMyt1/Volundr | 0142c03b233e15609ad0688ef918aecc45522430 | [
"MIT"
] | 6 | 2019-05-07T16:47:06.000Z | 2020-10-21T13:05:57.000Z | Valkyries/FASTQ_Tools.py | Gaorav-Gupta-Lab/Volundr | 0142c03b233e15609ad0688ef918aecc45522430 | [
"MIT"
] | 2 | 2020-03-28T02:39:12.000Z | 2020-10-26T14:42:27.000Z | """
Some helpers to make it easier to manipulate FASTQ files.
@author: Dennis A. Simpson
University of North Carolina at Chapel Hill
Chapel Hill, NC 27517
@copyright: 2020
"""
import pathlib
import gzip
import ntpath
import itertools
from operator import add
import collections
import time
import natsort
import pathos
import Levenshtein
import magic
from Valkyries import Tool_Box, FileWriter
__author__ = 'Dennis A. Simpson'
__version__ = "0.17.7"
class FastqSplitter:
"""
This class will chop up a FASTQ file or a pair of FASTQ files for use in the aligner or quality analysis.
The class name here no longer reflects the primary function of this class. The file split function is not needed
when using the parallel version of the aligners.
"""
def __init__(self, args, log, fastq1, fastq2, index1=None, index2=None, paired_end=False):
self.log = log
self.args = args
self.fastq1_file = fastq1
self.fastq2_file = fastq2
self.index1_file = index1
self.index2_file = index2
self.paired_end = paired_end
self.read_count = None
def new_file_size(self, line_count):
"""
:param line_count:
:return:
"""
# Adjust the number of lines to be written in each file.
self.log.info("Begin computing file size of new FASTQ files.")
self.read_count = line_count/4
file_size = self.read_count/int(self.args.Split)
if not file_size % 1 == 0:
file_size = 1 + int(file_size)
self.log.info("FASTQ file size computation done. {0} files containing {1} reads will be written, last file "
"may differ.".format(self.args.Split, file_size))
return file_size
def file_line_counter(self):
"""
This function counts the number of lines in FASTQ1. This information is needed to accurately split the FASTQ
file(s) for parallel alignment.
:return:
"""
infile = FASTQ_Reader(self.args.FASTQ1, self.log)
start_time = time.time()
self.log.info("Begin counting lines in {}.".format(self.args.FASTQ1))
bufgen = \
itertools.takewhile(lambda x: x, (infile.fq_file.read(1024 * 1024) for _ in itertools.repeat(None)))
line_count = sum(buf.count('\n') for buf in bufgen)
self.log.info("Found {0} reads in {1}. Count took {2} seconds"
.format(line_count/4, infile.file_name, int(time.time() - start_time)))
infile.fq_file.close()
return line_count
def temp_file_writer(self, limit):
"""
Write the temporary FASTQ files. Also create list of temporary BAM file names for use later.
:return:
"""
self.log.info("Begin writing temporary FASTQ files.")
i = 0
temp_file1 = None
temp_file2 = None
fastq_file_list = []
bam_file_list = []
read_count = 0
limit_counter = 0
while read_count <= self.read_count:
try:
# This generator is returning actual reads not lines.
fastq1_read = next(self.fastq1_file.seq_read())
fastq2_read = next(self.fastq2_file.seq_read())
if self.index1_file is not None:
fastq3_read = next(self.index1_file.seq_read())
except StopIteration:
read_count += 1
continue
read_count += 1
try:
fastq1_n_frac = fastq1_read.seq.count("N")/len(fastq1_read.seq)
fastq2_n_frac = fastq2_read.seq.count("N")/len(fastq2_read.seq)
except ZeroDivisionError:
continue
# Apply Filters
if (len(fastq1_read.seq) < int(self.args.Minimum_Length) or
len(fastq2_read.seq) < int(self.args.Minimum_Length) or
fastq1_n_frac >= float(self.args.N_Limit) or
fastq2_n_frac >= float(self.args.N_Limit)):
continue
if limit_counter % limit == 0:
if temp_file1:
temp_file1.close()
limit_counter = 0
if temp_file2:
temp_file2.close()
file1 = "{0}{1}_R1_tmp_{2}.fastq.gz".format(self.args.WorkingFolder, self.args.Job_Name, i)
file2 = "{0}{1}_R2_tmp_{2}.fastq.gz".format(self.args.WorkingFolder, self.args.Job_Name, i)
bam_file_list.append("{0}{1}_R1_tmp_{2}.bam".format(self.args.WorkingFolder, self.args.Job_Name, i))
fastq_file_list.append((file1, file2))
temp_file1 = Writer(self.log, file1)
temp_file2 = Writer(self.log, file2)
self.log.info("Writing {0} and {1}".format(file1, file2))
i += 1
limit_counter += 1
# BAM files are missing the barcodes because of a space in some of the header files.
# fastq1_read.name = fastq1_read.name.replace(" ", ":")
# fastq2_read.name = fastq2_read.name.replace(" ", ":")
# Add the UMT's to the header.
if self.args.HaloPLEX:
umi = fastq3_read.seq
header1 = "{0}|{1}:{2}".format(fastq1_read.name.split(":")[-1], umi, fastq1_read.name)
header2 = "{0}|{1}:{2}".format(fastq2_read.name.split(":")[-1], umi, fastq2_read.name)
elif self.args.ThruPLEX:
# header1 = "{0}|{1}".format(fastq1_read.name.split(":")[-1], fastq1_read.name)
umt1 = fastq1_read.seq[:6]
umt2 = fastq2_read.seq[:6]
header1 = "{0}|{1}:{2}".format(umt1, umt2, fastq1_read.name)
header2 = "{0}|{1}:{2}".format(umt1, umt2, fastq2_read.name)
else:
Tool_Box.debug_messenger("Only HaloPLEX or ThruPLEX currently enabled.")
self.log.error("Only HaloPLEX or ThruPLEX currently enabled.")
raise SystemExit(1)
# Trim adapter sequences from 5' end if needed.
if int(self.args.trim) > 0:
fastq1_read.seq = fastq1_read.seq[int(self.args.trim):]
fastq1_read.qual = fastq1_read.qual[int(self.args.trim):]
fastq2_read.seq = fastq2_read.seq[int(self.args.trim):]
fastq2_read.qual = fastq2_read.qual[int(self.args.trim):]
fastq1_read.name = header1
fastq2_read.name = header2
temp_file1.write(self.fastq1_file)
temp_file2.write(self.fastq2_file)
if temp_file1:
temp_file1.close()
if temp_file2:
temp_file2.close()
self.log.info("All temporary FASTQ files written")
return fastq_file_list, bam_file_list
def file_writer(self):
"""
Process FASTQ file(s) and write new version(s) suitable for aligners. Return file name.
:return:
"""
self.log.info("Begin writing temporary FASTQ files.")
current_read_count = 0
file1 = "{0}{1}_R1_processed.fastq.gz".format(self.args.WorkingFolder, self.args.Job_Name)
file2 = "{0}{1}_R2_processed.fastq.gz".format(self.args.WorkingFolder, self.args.Job_Name)
temp_file1 = Writer(self.log, file1)
temp_file2 = Writer(self.log, file2)
self.log.info("Writing {0} and {1}".format(file1, file2))
fastq1_list = []
fastq2_list = []
eof = False
Read = collections.namedtuple('Read', 'name, seq, index, qual')
# This generator returns objects not lines.
while not eof:
try:
fastq1_read = next(self.fastq1_file.seq_read())
fastq2_read = next(self.fastq2_file.seq_read())
if self.index1_file is not None:
index1_read = next(self.index1_file.seq_read())
if self.index2_file is not None:
index2_read = next(self.index2_file.seq_read())
except StopIteration:
eof = True
continue
current_read_count += 1
# Apply Filters
trim_5 = int(self.args.Trim5)
trim_3 = int(self.args.Trim3)
min_length = int(self.args.Minimum_Length) + trim_5 + trim_3
# Filter reads based on length and number of N's.
if (len(fastq1_read.seq) < min_length or len(fastq2_read.seq) < min_length
or fastq1_read.seq.count("N") / len(fastq1_read.seq) >= float(self.args.N_Limit)
or fastq2_read.seq.count("N")/len(fastq2_read.seq) >= float(self.args.N_Limit)):
continue
# Add the UMT's to the header.
if self.args.HaloPLEX:
header1 = "{0}|{0}:{1}".format(index1_read.seq, fastq1_read.name)
header2 = "{0}|{0}:{1}".format(index1_read.seq, fastq2_read.name)
# Fixme: This needs to be exposed to the user.
# Short HaloPLEX reads have issues. Found that reads <= 100 all show a 3' -1 or -2 error
if len(fastq1_read.seq) <= 100:
read_trim(fastq1_read, trim5=0, trim3=3)
if len(fastq2_read.seq) <= 100:
read_trim(fastq2_read, trim5=0, trim3=3)
elif self.args.ThruPLEX:
# header1 = "{0}|{1}".format(fastq1_read.name.split(":")[-1], fastq1_read.name)
umt1 = fastq1_read.seq[:6]
umt2 = fastq2_read.seq[:6]
header1 = "{0}|{1}:{2}".format(umt1, umt2, fastq1_read.name)
header2 = "{0}|{1}:{2}".format(umt1, umt2, fastq2_read.name)
read_trim(fastq1_read, trim5=len(umt1), trim3=0)
read_trim(fastq2_read, trim5=len(umt2), trim3=0)
elif self.args.FASTQ_PreProcess:
# The indices are after the last ":" in the header.
header1 = "{}:{}+{}".format(fastq1_read.name, index1_read.seq, index2_read.seq)
header2 = "{}:{}+{}".format(fastq2_read.name, index1_read.seq, index2_read.seq)
else:
self.log.error("Only HaloPLEX or ThruPLEX currently enabled.")
raise SystemExit(1)
# Trim sequences from ends if needed.
if trim_5 > 0 or trim_3 > 0:
read_trim(fastq1_read, trim_5, trim_3)
read_trim(fastq2_read, trim_5, trim_3)
fastq1_read.name = header1
fastq2_read.name = header2
fastq1_list.append(Read(fastq1_read.name, fastq1_read.seq, fastq1_read.index, fastq1_read.qual))
fastq2_list.append(Read(fastq2_read.name, fastq2_read.seq, fastq2_read.index, fastq2_read.qual))
# empirically determined for UNC Longleaf cluster. May need to expose this to user. Writes blocks of data
# to disk speeding up entire process.
if current_read_count % 1000000 == 0:
temp_file1.write(fastq1_list)
temp_file2.write(fastq2_list)
fastq1_list.clear()
fastq2_list.clear()
# Cleans up any writes still needed and closes files
if fastq1_list:
temp_file1.write(fastq1_list)
fastq1_list.clear()
if fastq2_list:
temp_file2.write(fastq2_list)
fastq2_list.clear()
if temp_file1:
temp_file1.close()
if temp_file2:
temp_file2.close()
self.log.info("Modified FASTQ file(s) written")
if self.args.FASTQ_PreProcess:
Tool_Box.compress_files(file1, self.log)
Tool_Box.compress_files(file2, self.log)
return file1, file2
class FastqProcessing:
def __init__(self, args, log, paired_end=False):
self.log = log
self.args = args
self.fq1_batch = None
self.fq2_batch = None
self.fastq3_file = None
self.paired_end = paired_end
self.master_index_dict = {}
self.sample_manifest_list = None
self.sample_manifest_dictionary = collections.defaultdict(list)
self.demultiplex_file_names = collections.defaultdict(list)
self.summary_data = collections.defaultdict(list)
def dataframe_build(self):
"""
Build the dataframes containing the worker ID, indices, and file names.
"""
self.log.info("Begin building dataframes")
with open(self.args.Master_Index_File) as f:
for l in f:
if "#" in l or not l:
continue
l_list = [x for x in l.strip("\n").split("\t")]
self.master_index_dict[l_list[0]] = [l_list[1], l_list[2]]
self.sample_manifest_list = Tool_Box.FileParser.indices(self.log, self.args.SampleManifest)
outfile_list_dict = collections.defaultdict(list)
'''
self.demultiplex_file_names["Unknown"] = \
[Writer(self.log, "{}{}_Unknown_R1.fastq.gz".format(self.args.WorkingFolder, self.args.JobName), "Unknown"),
Writer(self.log, "{}{}_Unknown_R2.fastq.gz".format(self.args.WorkingFolder, self.args.JobName), "Unknown")]
self.demultiplex_file_names["GhostIndex"] = \
[Writer(self.log, "{}{}_GhostIndex_R1.fastq.gz".format(self.args.WorkingFolder, self.args.JobName), "GhostIndex"),
Writer(self.log, "{}{}_GhostIndex_R2.fastq.gz".format(self.args.WorkingFolder, self.args.JobName), "GhostIndex")]
'''
# Initialize our summary dataframe
self.summary_data["Unknown"] = ["", "", 0]
self.summary_data["GhostIndex"] = ["", "", 0]
self.summary_data['total_reads'] = ["", "", 0]
for i in range(int(self.args.Spawn)):
outfile_list_dict["Unknown"].append(
["{}{}_Unknown_R1.fastq.gz".format(self.args.WorkingFolder, i),
"{}{}_Unknown_R2.fastq.gz".format(self.args.WorkingFolder, i)])
'''
I noticed with Nextera Libraries we occasionally found an index in a read that was valid but never used
in the library prep. I don't know, yet, how prevalent these Ghost Indices are so I am capturing them for
later analysis.
'''
outfile_list_dict["GhostIndex"].append(
["{}{}_GhostIndex_R1.fastq.gz".format(self.args.WorkingFolder, i),
"{}{}_GhostIndex_R2.fastq.gz".format(self.args.WorkingFolder, i)])
for sample in self.sample_manifest_list:
sample_index = sample[0]
self.sample_manifest_dictionary[sample_index] = [sample[1], sample[2], sample[3]]
# Check for errors in the sample manifest
if sample_index not in self.master_index_dict:
self.log.error("Sample index {} for sample {}, replicate {} is not in Master Index File.".
format(sample_index, sample[1], sample[2]))
raise SystemExit(1)
for i in range(int(self.args.Spawn)):
sample_key = "{}|{}".format(i, sample_index)
# Check for more errors in the sample manifest
if sample_key in self.demultiplex_file_names:
self.log.error("The index {} is duplicated. Correct the error in {} and try again."
.format(sample_index, self.args.SampleManifest))
raise SystemExit(1)
# Get a list of the temporary FASTQ file names generated.
outfile_list_dict[sample_index].append(
["{}{}_{}_R1.fastq.gz".format(self.args.WorkingFolder, i, sample_index)])
# Initialize FASTQ file objects
# tmp1 = gzip.open("{}{}_{}_R1.fastq.gz".format(self.args.WorkingFolder, i, sample_index), "wb")
# tmp2 = gzip.open("{}{}_{}_R2.fastq.gz".format(self.args.WorkingFolder, i, sample_index), "wb")
# tmp1.close()
# tmp2.close()
'''
self.demultiplex_file_names[sample_key] = \
[Writer(self.log,
"{}{}_{}_R1.fq.gz".format(self.args.WorkingFolder, i, sample_index),
sample_index),
Writer(self.log,
"{}{}_{}_R2.fq.gz".format(self.args.WorkingFolder, i, sample_index),
sample_index)]
'''
# Add sample tracking framework
self.summary_data[sample_index] = [sample[1], sample[2], 0]
self.log.info("Dataframes built")
return
def file_writer(self, worker_id, fq1_batch):
"""
Called by multiprocessor in MainEntry. Process FASTQ file(s) and write temp version(s).
:param tmp_file_list:
:param worker_id:
:param fq1_batch:
:return:
"""
self.output_process(worker_id, FileWriter.file_writer(self, fq1_batch))
def output_process(self, worker_id, temp_data_dict):
"""
Write data to the appropriate temporary compressed file.
:param file_list:
:param worker_id:
:param temp_data_dict:
"""
for sample_index in temp_data_dict:
tmp_count_file = open("{}{}_{}_mismatch.tmp".format(self.args.WorkingFolder, worker_id, sample_index), "a")
tmp_count_file.write("{}\t{}\t{}\n"
.format(temp_data_dict[sample_index]["QC"][0], temp_data_dict[sample_index]["QC"][1],
temp_data_dict[sample_index]["QC"][2]))
tmp_count_file.close()
outstring = ""
for block in temp_data_dict[sample_index]["R1"]:
outstring += "{}".format(block)
outfile = open("{}{}_{}.tmp".format(self.args.WorkingFolder, worker_id, sample_index), "a")
outfile.write(outstring)
outfile.close()
# Not clearing this results in duplications in the output. Strange since the dictionary is rebuilt everytime
# within the job.
temp_data_dict.clear()
class FastqQuality:
"""
This class will look at various aspects of the library. As of Aug. 22 2017 it will analyze ThruPLEX data only.
"""
# ToDo: Add ability to look at average read quality and positional read qualities grouped by index.
def __init__(self, args, log, paired_end):
self.index_list = Tool_Box.FileParser.indices(log, args.Index_File)
self.index_list.append(("Unknown", "Unknown"))
self.log = log
self.args = args
self.paired_end = paired_end
self.file1_anchor_seq = "TCAGTAGCTCA"
self.file2_anchor_seq = "TCAGTAGCTCA"
self.anchor_dict = None
self.umt_counts_dict = None
def module_director(self, splitter_data):
"""
Parallel job coordination and data processing call.
"""
self.log.info("Spawning {0} parallel jobs for quality analysis of {1} temporary FASTQ files."
.format(self.args.Spawn, len(splitter_data.fastq_file_list)))
dict_list = []
data_bundle = int(self.args.prog_check), self.index_list, self.file1_anchor_seq, self.file2_anchor_seq
p = pathos.multiprocessing.Pool(int(self.args.Spawn))
dict_list += p.starmap(self.quality_check, zip(itertools.repeat(data_bundle), splitter_data.fastq_file_list))
# Data captured from the multiprocessing pool in this manner is messy. This sorts it.
anchor_dict = collections.defaultdict(lambda: collections.defaultdict(list))
umt_counts_dict = collections.defaultdict(lambda: collections.defaultdict(int))
for data_dicts in dict_list:
for dd in data_dicts:
for index_key in dd:
for key2 in dd[index_key]:
if key2 in ("R1", "R2"):
# Total the anchor seq lengths.
r2_list = anchor_dict[index_key]["R2"]
r1_list = anchor_dict[index_key]["R1"]
anchor_dict[index_key]["R1"] = list(map(add, data_dicts[0][index_key]["R1"], r1_list))
anchor_dict[index_key]["R2"] = list(map(add, data_dicts[0][index_key]["R2"], r2_list))
# if index_key in anchor_dict:
# r2_list = anchor_dict[index_key]["R2"]
# r1_list = anchor_dict[index_key]["R1"]
# anchor_dict[index_key]["R1"] = list(map(add, data_dicts[0][index_key]["R1"], r1_list))
# anchor_dict[index_key]["R2"] = list(map(add, data_dicts[0][index_key]["R2"], r2_list))
# else:
# anchor_dict[index_key]["R1"] = data_dicts[0][index_key]["R1"]
# anchor_dict[index_key]["R2"] = data_dicts[0][index_key]["R2"]
else:
# Total the UMT count data
umt_counts_dict[index_key][key2] += data_dicts[1][index_key][key2]
self.anchor_dict = anchor_dict
self.umt_counts_dict = umt_counts_dict
self.log.info("All parallel quality analysis jobs complete. Begin compiling data.")
self.data_processing()
def data_processing(self):
"""
Gather the data from all the dictionaries and format it for the output files.
"""
# Build the header string and write it to the output file.
anchor_outstring = "Sample_Name\tIndex\tTotal_Reads"
if self.paired_end:
anchor_outstring = "Sample_Name\tIndex\tTotal_Reads\tPair"
for i in range(len(self.file1_anchor_seq) + 1):
anchor_outstring += "\t{0}_Mismatch".format(i)
# Process anchor sequence data and write file.
for index in self.index_list:
for key in natsort.natsorted(self.anchor_dict[index[0]]):
tmp_string = "\t".join(str(x) for x in self.anchor_dict[index[0]][key])
total_reads = sum(self.anchor_dict[index[0]][key]) * 0.5
anchor_outstring += "\n{0}\t{1}\t{2}\t{3}\t{4}".format(index[1], index[0], total_reads, key, tmp_string)
# Initialize the output file.
outfile = "{0}{1}_FASTQ_QualityAssessment.txt".format(self.args.WorkingFolder, self.args.Job_Name)
quality_outfile = open(outfile, "w")
quality_outfile.write(anchor_outstring)
quality_outfile.close()
self.log.info("{0}_FASTQ_QualityAssessment.txt".format(self.args.Job_Name))
self.anchor_dict.clear()
@staticmethod
def quality_check(data_bundle, fastq_files):
"""
Called by the multiprocessor pool. Examines the indices and determines the mismatches and N counts.
:param data_bundle:
:param fastq_files:
:return:
"""
prog_check = data_bundle[0]
index_list = data_bundle[1]
file1_anchor_seq = data_bundle[2]
file2_anchor_seq = data_bundle[3]
fastq1 = FASTQ_Reader(fastq_files[0])
fastq2 = FASTQ_Reader(fastq_files[1])
umt_dict = collections.defaultdict(lambda: collections.defaultdict(int))
anchor_dict = Tool_Box.VivifiedDictionary()
read_count = 0
try:
while True:
fastq1_read = next(fastq1.seq_read())
fastq2_read = next(fastq2.seq_read())
read_count += 1
if read_count % int(prog_check) == 0:
print(" -->Processed {0} reads in file {1} and {2}."
.format(read_count, fastq_files[0], fastq_files[1]))
# Get read index and UMT.
umt = "{0}{1}".format(fastq1_read.name.split("|")[0], fastq2_read.name.split("|")[1].split(":")[0])
read_index = fastq1_read.name.split(":")[-1]
# Quantify anchor lengths.
unknown_anchor1 = fastq1_read.seq[7: 18]
unknown_anchor2 = fastq2_read.seq[7: 18]
match1 = Levenshtein.distance(file1_anchor_seq, unknown_anchor1)
match2 = Levenshtein.distance(file2_anchor_seq, unknown_anchor2)
for index in index_list:
index_match = Levenshtein.distance(read_index, index[0][: 6])
# Add anchor and UMT data to dictionaries.
if index[0] in anchor_dict and index_match < 2:
anchor_dict[index[0]]["R1"][match1] += 1
anchor_dict[index[0]]["R2"][match2] += 1
umt_dict[index[0]][umt] += 1
# if umt in umt_dict[index[0]]:
# umt_dict[index[0]][umt] += 1
# else:
# umt_dict[index[0]][umt] = 1
elif index_match < 2:
anchor_dict[index[0]]["R1"] = [0] * len(file1_anchor_seq)
anchor_dict[index[0]]["R2"] = [0] * len(file2_anchor_seq)
anchor_dict[index[0]]["R1"][match1] += 1
anchor_dict[index[0]]["R2"][match2] += 1
umt_dict[index[0]][umt] = 1
except StopIteration:
return anchor_dict, umt_dict
class Writer:
"""
Write new FASTQ file.
"""
__slots__ = ['log', 'file']
def __init__(self, log, out_file_string):
"""
:param log:
:param out_file_string:
"""
self.file = open(out_file_string, "w")
# self.file = gzip.open(out_file_string, "rb")
self.log = log
def lethal_write(self, read):
"""
This is potentially dead code.
:param read:
:return:
"""
# ToDo: Potentially dead code block.
outstring = ""
try:
assert len(read.seq) == len(read.qual)
except AssertionError:
self.log.error("Sequence and quality scores of different lengths! Read Name {0}; Seq Length {1}; Qual "
"Length {2}".format(read.name, len(read.seq), len(read.qual)))
raise SystemExit(1)
outstring += "@{0}\n{1}\n{2}\n{3}\n".format(read.name, read.seq, read.index, read.qual)
self.file.write(outstring)
return True
def write(self, read_list):
"""
Writes our new FASTQ file.
:param read_list:
:return:
"""
outstring = ""
for read in read_list:
try:
assert len(read[1]) == len(read[2])
except AssertionError:
self.log.error("Sequence and quality scores of different lengths! Read Name {0}; Seq Length {1}; Qual "
"Length {2}".format(read[0], len(read[1]), len(read[2])))
raise SystemExit(1)
outstring += "@{}\n{}\n+\n{}\n".format(read[0], read[1], read[2])
self.file.write(outstring)
read_list.clear()
return True
def close(self):
"""
Closes FASTQ file
:return:
"""
self.file.close()
return True
class FASTQ_Reader:
"""
Main class that creates FASTQ reads using a generator
"""
__slots__ = ['input_file', 'log', 'name', 'seq', 'index', 'qual', 'read_block', 'file_name', 'fq_file']
def __init__(self, input_file, log=None):
"""
Splits the FASTQ read list from the FASTQ Iterator into the lines to be manipulated. Also does a check to make
sure the sequence length = quality string length.
:param input_file:
:return:
"""
self.name = None
self.seq = None
self.index = None
self.qual = None
self.input_file = input_file
self.log = log
self.read_block = []
self.file_name = ntpath.basename(input_file)
self.fq_file = self.__fastq_file()
def __fastq_file(self):
"""
Create FASTQ file object.
:return:
"""
if len(self.input_file) < 3:
self.log.warning("FASTQ file parameter missing from options file. Correct error and try again.")
raise SystemExit(1)
if not pathlib.Path(self.input_file).is_file():
self.log.warning("FASTQ file {} not found. Correct error and run again.".format(self.input_file))
raise SystemExit(1)
try:
mime_type = magic.from_file(self.input_file, mime=True).decode()
except AttributeError:
mime_type = magic.from_file(self.input_file, mime=True)
if "text" in mime_type:
fq_file = open(self.input_file, 'rU')
elif "gzip" in mime_type:
fq_file = gzip.open(self.input_file, 'rt', encoding='utf-8')
else:
self.log.warning("Unsupported file-type for {}. Only TEXT or GZIP Allowed.".format(self.input_file))
raise SystemExit(1)
return fq_file
def line_reader(self):
"""
Part of generator for FASTQ reads
"""
for line in self.fq_file:
while True:
yield line
def seq_read(self):
"""
Generator reads FASTQ file creating read block.
"""
read_block = []
count = 0
eof = False
try:
# for i in range(4):
while count < 4:
read_block.append(next(FASTQ_Reader.line_reader(self)))
count += 1
except StopIteration:
eof = True
if len(read_block) == 4 and not eof:
self.name = read_block[0].strip("\n").strip("@")
self.seq = read_block[1].strip("\n").strip()
self.index = read_block[2].strip("\n").strip()
self.qual = read_block[3].strip("\n").strip()
if len(self.seq) != len(self.qual):
self.log.error("Sequence and quality scores of different lengths! \n{0:s}\n{1:s}\n{2:s}\n{3:s}"
.format(self.name, self.seq, self.index, self.qual))
raise ValueError("Sequence and quality scores of different lengths! \n{0:s}\n{1:s}\n{2:s}\n{3:s}"
.format(self.name, self.seq, self.index, self.qual))
yield self
# I am using this as my EOF. Not so sure the code ever reaches this.
self.name = None
def read_trim(fastq_read, trim5=None, trim3=None):
"""
Provide additional trimming to reads beyond the adaptor trim.
:param fastq_read:
:param trim5:
:param trim3:
"""
# fastq_read.seq = fastq_read.seq[trim5:-trim3]
# fastq_read.qual = fastq_read.qual[trim5:-trim3]
#
if trim5 and trim3:
fastq_read.qual = fastq_read.qual[trim5:-trim3]
elif trim5:
fastq_read.seq = fastq_read.seq[trim5:]
fastq_read.qual = fastq_read.qual[trim5:]
elif trim3:
fastq_read.seq = fastq_read.seq[:-trim3]
fastq_read.qual = fastq_read.qual[:-trim3]
| 40.307593 | 126 | 0.570229 |
8a53151e0c799d627c477ec7623bbef6ae8b1979 | 4,628 | py | Python | moto/ec2/responses/vpcs.py | gvlproject/moto | b1c51faaf5dbf79a76eca29724b7d22b87e27502 | [
"Apache-2.0"
] | 1 | 2021-06-14T18:05:40.000Z | 2021-06-14T18:05:40.000Z | moto/ec2/responses/vpcs.py | gvlproject/moto | b1c51faaf5dbf79a76eca29724b7d22b87e27502 | [
"Apache-2.0"
] | 1 | 2021-12-13T20:51:54.000Z | 2021-12-13T20:51:54.000Z | moto/ec2/responses/vpcs.py | gvlproject/moto | b1c51faaf5dbf79a76eca29724b7d22b87e27502 | [
"Apache-2.0"
] | 1 | 2019-06-19T06:56:40.000Z | 2019-06-19T06:56:40.000Z | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import filters_from_querystring
class VPCs(BaseResponse):
def create_vpc(self):
cidr_block = self._get_param('CidrBlock')
instance_tenancy = self._get_param('InstanceTenancy', if_none='default')
vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy)
template = self.response_template(CREATE_VPC_RESPONSE)
return template.render(vpc=vpc)
def delete_vpc(self):
vpc_id = self._get_param('VpcId')
vpc = self.ec2_backend.delete_vpc(vpc_id)
template = self.response_template(DELETE_VPC_RESPONSE)
return template.render(vpc=vpc)
def describe_vpcs(self):
vpc_ids = self._get_multi_param('VpcId')
filters = filters_from_querystring(self.querystring)
vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)
template = self.response_template(DESCRIBE_VPCS_RESPONSE)
return template.render(vpcs=vpcs)
def describe_vpc_attribute(self):
vpc_id = self._get_param('VpcId')
attribute = self._get_param('Attribute')
attr_name = camelcase_to_underscores(attribute)
value = self.ec2_backend.describe_vpc_attribute(vpc_id, attr_name)
template = self.response_template(DESCRIBE_VPC_ATTRIBUTE_RESPONSE)
return template.render(vpc_id=vpc_id, attribute=attribute, value=value)
def modify_vpc_attribute(self):
vpc_id = self._get_param('VpcId')
for attribute in ('EnableDnsSupport', 'EnableDnsHostnames'):
if self.querystring.get('%s.Value' % attribute):
attr_name = camelcase_to_underscores(attribute)
attr_value = self.querystring.get('%s.Value' % attribute)[0]
self.ec2_backend.modify_vpc_attribute(
vpc_id, attr_name, attr_value)
return MODIFY_VPC_ATTRIBUTE_RESPONSE
CREATE_VPC_RESPONSE = """
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpc>
<vpcId>{{ vpc.id }}</vpcId>
<state>pending</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %}</dhcpOptionsId>
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpc>
</CreateVpcResponse>"""
DESCRIBE_VPCS_RESPONSE = """
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcSet>
{% for vpc in vpcs %}
<item>
<vpcId>{{ vpc.id }}</vpcId>
<state>{{ vpc.state }}</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %}</dhcpOptionsId>
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
<isDefault>{{ vpc.is_default }}</isDefault>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpcSet>
</DescribeVpcsResponse>"""
DELETE_VPC_RESPONSE = """
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcResponse>
"""
DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcId>{{ vpc_id }}</vpcId>
<{{ attribute }}>
<value>{{ value }}</value>
</{{ attribute }}>
</DescribeVpcAttributeResponse>"""
MODIFY_VPC_ATTRIBUTE_RESPONSE = """
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</ModifyVpcAttributeResponse>"""
| 38.890756 | 123 | 0.654927 |
f41f65e93674ff34d66bc80f53248d89896d17bc | 2,414 | py | Python | data/p4VQE/R4/benchmark/startQiskit_noisy425.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_noisy425.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit_noisy425.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=11
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=8
prog.h(input_qubit[2]) # number=3
prog.x(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.x(input_qubit[1]) # number=9
prog.x(input_qubit[1]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy425.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.822222 | 118 | 0.630489 |
476e7f1c75c9d6dffeea266de39268e2a0b825ea | 14,521 | py | Python | llvm/utils/UpdateTestChecks/asm.py | dongkyunahn-intel/llvm | a3bbc1105cf41809e6ea546ee1c541e8522db6d6 | [
"Apache-2.0"
] | null | null | null | llvm/utils/UpdateTestChecks/asm.py | dongkyunahn-intel/llvm | a3bbc1105cf41809e6ea546ee1c541e8522db6d6 | [
"Apache-2.0"
] | null | null | null | llvm/utils/UpdateTestChecks/asm.py | dongkyunahn-intel/llvm | a3bbc1105cf41809e6ea546ee1c541e8522db6d6 | [
"Apache-2.0"
] | 1 | 2021-11-23T17:16:34.000Z | 2021-11-23T17:16:34.000Z | from __future__ import print_function
import re
import sys
from . import common
if sys.version_info[0] > 2:
class string:
expandtabs = str.expandtabs
else:
import string
# RegEx: this is where the magic happens.
##### Assembly parser
ASM_FUNCTION_X86_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*(@"?(?P=func)"?| -- Begin function (?P=func))\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_RE = re.compile(
r'^(?P<func>[0-9a-zA-Z_]+):\n' # f: (name of function)
r'\s+\.fnstart\n' # .fnstart
r'(?P<body>.*?)\n' # (body of the function)
r'.Lfunc_end[0-9]+:', # .Lfunc_end0: or # -- End function
flags=(re.M | re.S))
ASM_FUNCTION_AARCH64_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@"?(?P=func)"?( (Function|Tail Call))?\n'
r'(?:[ \t]+.cfi_startproc\n)?' # drop optional cfi noise
r'(?P<body>.*?)\n'
# This list is incomplete
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_AMDGPU_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
r'(?P<body>.*?)\n' # (body of the function)
# This list is incomplete
r'^\s*(\.Lfunc_end[0-9]+:\n|\.section)',
flags=(re.M | re.S))
ASM_FUNCTION_HEXAGON_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*//[ \t]*@"?(?P=func)"?\n[^:]*?'
r'(?P<body>.*?)\n' # (body of the function)
# This list is incomplete
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_MIPS_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n[^:]*?' # f: (name of func)
r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+' # Mips+LLVM standard asm prologue
r'(?P<body>.*?)\n' # (body of the function)
# Mips+LLVM standard asm epilogue
r'(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)'
r'(\$|\.L)func_end[0-9]+:\n', # $func_end0: (mips32 - O32) or
# .Lfunc_end0: (mips64 - NewABI)
flags=(re.M | re.S))
ASM_FUNCTION_MSP430_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
r'(?P<body>.*?)\n'
r'(\$|\.L)func_end[0-9]+:\n', # $func_end0:
flags=(re.M | re.S))
ASM_FUNCTION_PPC_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
r'.*?'
r'\.Lfunc_begin[0-9]+:\n'
r'(?:[ \t]+.cfi_startproc\n)?'
r'(?:\.Lfunc_[gl]ep[0-9]+:\n(?:[ \t]+.*?\n)*)*'
r'(?P<body>.*?)\n'
# This list is incomplete
r'(?:^[ \t]*(?:\.long[ \t]+[^\n]+|\.quad[ \t]+[^\n]+)\n)*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_RISCV_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
r'(?:\s*\.?L(?P=func)\$local:\n)?' # optional .L<func>$local: due to -fno-semantic-interposition
r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_LANAI_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
r'(?:[ \t]+.cfi_startproc\n)?' # drop optional cfi noise
r'(?P<body>.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_SPARC_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
r'(?P<body>.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_SYSTEMZ_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
r'[ \t]+.cfi_startproc\n'
r'(?P<body>.*?)\n'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_AARCH64_DARWIN_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*;[ \t]@"?(?P=func)"?\n'
r'([ \t]*.cfi_startproc\n[\s]*)?'
r'(?P<body>.*?)'
r'([ \t]*.cfi_endproc\n[\s]*)?'
r'^[ \t]*;[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_DARWIN_RE = re.compile(
r'^[ \t]*\.globl[ \t]*_(?P<func>[^ \t])[ \t]*@[ \t]--[ \t]Begin[ \t]function[ \t]"?(?P=func)"?'
r'(?P<directives>.*?)'
r'^_(?P=func):\n[ \t]*'
r'(?P<body>.*?)'
r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S ))
ASM_FUNCTION_ARM_MACHO_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*\n'
r'([ \t]*.cfi_startproc\n[ \t]*)?'
r'(?P<body>.*?)\n'
r'[ \t]*\.cfi_endproc\n',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_IOS_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*\n'
r'^Lfunc_begin(?P<id>[0-9][1-9]*):\n'
r'(?P<body>.*?)'
r'^Lfunc_end(?P=id):\n'
r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S))
ASM_FUNCTION_WASM32_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
r'(?P<body>.*?)\n'
r'^\s*(\.Lfunc_end[0-9]+:\n|end_function)',
flags=(re.M | re.S))
SCRUB_X86_SHUFFLES_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
flags=re.M))
SCRUB_X86_SHUFFLES_NO_MEM_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$',
flags=re.M))
SCRUB_X86_SPILL_RELOAD_RE = (
re.compile(
r'-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$',
flags=re.M))
SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
SCRUB_X86_LCP_RE = re.compile(r'\.LCPI[0-9]+_[0-9]+')
SCRUB_X86_RET_RE = re.compile(r'ret[l|q]')
def scrub_asm_x86(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Detect shuffle asm comments and hide the operands in favor of the comments.
if getattr(args, 'no_x86_scrub_mem_shuffle', True):
asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r'\1 {{.*#+}} \2', asm)
else:
asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
# Detect stack spills and reloads and hide their exact offset and whether
# they used the stack pointer or frame pointer.
asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)
if getattr(args, 'x86_scrub_sp', True):
# Generically match the stack offset of a memory operand.
asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
if getattr(args, 'x86_scrub_rip', False):
# Generically match a RIP-relative memory operand.
asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
if getattr(args, 'extra_scrub', False):
# Avoid generating different checks for 32- and 64-bit because of 'retl' vs 'retq'.
asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)
# Strip kill operands inserted into the asm.
asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_amdgpu(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_arm_eabi(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip kill operands inserted into the asm.
asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_hexagon(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_powerpc(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip unimportant comments, but leave the token '#' in place.
asm = common.SCRUB_LOOP_COMMENT_RE.sub(r'#', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
# Strip the tailing token '#', except the line only has token '#'.
asm = common.SCRUB_TAILING_COMMENT_TOKEN_RE.sub(r'', asm)
return asm
def scrub_asm_mips(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_msp430(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_riscv(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_lanai(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_sparc(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_systemz(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_wasm32(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def get_triple_from_march(march):
triples = {
'amdgcn': 'amdgcn',
'r600': 'r600',
'mips': 'mips',
'sparc': 'sparc',
'hexagon': 'hexagon',
}
for prefix, triple in triples.items():
if march.startswith(prefix):
return triple
print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
return 'x86'
def build_function_body_dictionary_for_triple(args, raw_tool_output, triple,
prefixes, func_dict, func_order):
target_handlers = {
'i686': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
'aarch64-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'arm': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
'arm64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
'arm64e': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
'arm64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
'armv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
'armv7-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
'thumb': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
'thumb-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
'thumbv5-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
'thumbv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
'mips': (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
'msp430': (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
'ppc32': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
'powerpc': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
'riscv32': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'riscv64': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'lanai': (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
'sparc': (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
's390x': (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
'wasm32': (scrub_asm_wasm32, ASM_FUNCTION_WASM32_RE),
}
handler = None
best_prefix = ''
for prefix, s in target_handlers.items():
if triple.startswith(prefix) and len(prefix) > len(best_prefix):
handler = s
best_prefix = prefix
if handler is None:
raise KeyError('Triple %r is not supported' % (triple))
scrubber, function_re = handler
common.build_function_body_dictionary(
function_re, scrubber, [args], raw_tool_output, prefixes,
func_dict, func_order, args.verbose, False, False)
##### Generator of assembly CHECK lines
def add_asm_checks(output_lines, comment_marker, prefix_list, func_dict, func_name):
# Label format is based on ASM string.
check_label_format = '{} %s-LABEL: %s%s:'.format(comment_marker)
global_vars_seen_dict = {}
common.add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, check_label_format, True, False, global_vars_seen_dict)
| 38.112861 | 140 | 0.62234 |
4b2bb42cb6b141e3f7ae7b9ad1d43045f57eb201 | 11,697 | py | Python | tensorflow/python/kernel_tests/proto/decode_proto_op_test_base.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/proto/decode_proto_op_test_base.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/proto/decode_proto_op_test_base.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for decode_proto op."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto decoding ops."""
def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DecodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DecodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
def _compareValues(self, fd, vs, evs):
"""Compare lists/arrays of field values."""
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for i, ev in enumerate(evs):
# Special case fuzzy match for float32. TensorFlow seems to mess with
# MAX_FLT slightly and the test doesn't work otherwise.
# TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
if fd.cpp_type == fd.CPPTYPE_FLOAT:
# Numpy isclose() is better than assertIsClose() which uses an absolute
# value comparison.
self.assertTrue(
np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
elif fd.cpp_type == fd.CPPTYPE_STRING:
# In Python3 string tensor values will be represented as bytes, so we
# reencode the proto values to match that.
self.assertEqual(vs[i], ev.encode('ascii'))
else:
# Doubles and other types pass through unscathed.
self.assertEqual(vs[i], ev)
def _compareProtos(self, batch_shape, sizes, fields, field_dict):
"""Compare protos of type TestValue.
Args:
batch_shape: the shape of the input tensor of serialized messages.
sizes: int matrix of repeat counts returned by decode_proto
fields: list of test_example_pb2.FieldSpec (types and expected values)
field_dict: map from field names to decoded numpy tensors of values
"""
# Check that expected values match.
for field in fields:
values = field_dict[field.name]
self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
if 'ext_value' in field.name:
fd = test_example_pb2.PrimitiveValue()
else:
fd = field.value.DESCRIPTOR.fields_by_name[field.name]
# Values has the same shape as the input plus an extra
# dimension for repeats.
self.assertEqual(list(values.shape)[:-1], batch_shape)
# Nested messages are represented as TF strings, requiring
# some special handling.
if field.name == 'message_value' or 'ext_value' in field.name:
vs = []
for buf in values.flat:
msg = test_example_pb2.PrimitiveValue()
msg.ParseFromString(buf)
vs.append(msg)
if 'ext_value' in field.name:
evs = field.value.Extensions[test_example_pb2.ext_value]
else:
evs = getattr(field.value, field.name)
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for v, ev in zip(vs, evs):
self.assertEqual(v, ev)
continue
tf_type_to_primitive_value_field = {
dtypes.bool:
'bool_value',
dtypes.float32:
'float_value',
dtypes.float64:
'double_value',
dtypes.int8:
'int8_value',
dtypes.int32:
'int32_value',
dtypes.int64:
'int64_value',
dtypes.string:
'string_value',
dtypes.uint8:
'uint8_value',
dtypes.uint32:
'uint32_value',
dtypes.uint64:
'uint64_value',
}
tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
if tf_field_name is None:
self.fail('Unhandled tensorflow type %d' % field.dtype)
self._compareValues(fd, values.flat,
getattr(field.value, tf_field_name))
def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
message_type, message_format, sanitize,
force_disordered=False):
"""Run decode tests on a batch of messages.
Args:
fields: list of test_example_pb2.FieldSpec (types and expected values)
case_sizes: expected sizes array
batch_shape: the shape of the input tensor of serialized messages
batch: list of serialized messages
message_type: descriptor name for messages
message_format: format of messages, 'text' or 'binary'
sanitize: whether to sanitize binary protobuf inputs
force_disordered: whether to force fields encoded out of order.
"""
if force_disordered:
# Exercise code path that handles out-of-order fields by prepending extra
# fields with tag numbers higher than any real field. Note that this won't
# work with sanitization because that forces reserialization using a
# trusted decoder and encoder.
assert not sanitize
extra_fields = test_example_pb2.ExtraFields()
extra_fields.string_value = 'IGNORE ME'
extra_fields.bool_value = False
extra_msg = extra_fields.SerializeToString()
batch = [extra_msg + msg for msg in batch]
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(batch, dtype=object)
batch = np.reshape(batch, batch_shape)
field_names = [f.name for f in fields]
output_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, vtensor = self._decode_module.decode_proto(
batch,
message_type=message_type,
field_names=field_names,
output_types=output_types,
message_format=message_format,
sanitize=sanitize)
vlist = sess.run([sizes] + vtensor)
sizes = vlist[0]
# Values is a list of tensors, one for each field.
value_tensors = vlist[1:]
# Check that the repeat sizes are correct.
self.assertTrue(
np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
# Check that the decoded sizes match the expected sizes.
self.assertEqual(len(sizes.flat), len(case_sizes))
self.assertTrue(
np.all(sizes.flat == np.array(
case_sizes, dtype=np.int32)))
field_dict = dict(zip(field_names, value_tensors))
self._compareProtos(batch_shape, sizes, fields, field_dict)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinary(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinaryDisordered(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False,
force_disordered=True)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testPacked(self, case):
# Now try with the packed serialization.
#
# We test the packed representations by loading the same test case using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the
# test message using the packed version of the proto.
packed_batch = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
packed_batch,
'tensorflow.contrib.proto.PackedTestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testText(self, case):
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_batch = [
text_format.MessageToString(
value, float_format='.17g') for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
text_batch,
'tensorflow.contrib.proto.TestValue',
'text',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testSanitizerGood(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=True)
@parameterized.parameters((False), (True))
def testCorruptProtobuf(self, sanitize):
corrupt_proto = 'This is not a binary protobuf'
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(corrupt_proto, dtype=object)
msg_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['sizes']
field_types = [dtypes.int32]
with self.assertRaisesRegexp(
errors.DataLossError, 'Unable to parse binary protobuf'
'|Failed to consume entire buffer'):
self.evaluate(
self._decode_module.decode_proto(
batch,
message_type=msg_type,
field_names=field_names,
output_types=field_types,
sanitize=sanitize))
| 37.854369 | 91 | 0.638112 |
1cc4f05590314ddbf622feac2b2baba9a39089d6 | 1,760 | py | Python | majority rule/majority_rule.py | hide-dog/kaggle_titanic | 010d8b5621a54e95df9162265b655b91eeee00cf | [
"MIT"
] | null | null | null | majority rule/majority_rule.py | hide-dog/kaggle_titanic | 010d8b5621a54e95df9162265b655b91eeee00cf | [
"MIT"
] | null | null | null | majority rule/majority_rule.py | hide-dog/kaggle_titanic | 010d8b5621a54e95df9162265b655b91eeee00cf | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import glob
import csv
# ------------------------------------------------
# main
# ------------------------------------------------
def main():
ofile = "majority_rule.csv"
fff = glob.glob("solution*")
y = np.loadtxt(fff[0], delimiter=',', skiprows = 1, usecols = (1), unpack=True)
y_majority = np.zeros(len(y), dtype=np.int16)
for i in range(len(fff)):
x, y = np.loadtxt(fff[i], delimiter=',', skiprows = 1, usecols = (0, 1), unpack=True)
for j in range(len(y)):
y_majority[j] += y[j]
for i in range(len(y_majority)):
if len(fff)/2 <= y_majority[i]:
y_majority[i] = 1
else:
y_majority[i] = 0
# read file
test = pd.read_csv("test.csv")
# get PassengerId
pid = np.array(test["PassengerId"]).astype(int)
# combination of PassengerId and prediction
solution = pd.DataFrame(y_majority, pid, columns = ["Survived"])
# output .csv
solution.to_csv(ofile, index_label = ["PassengerId"])
#
correct = pd.read_csv("correct.csv")
correct_s = correct["Survived"].values
score = 0.0
for i in range(len(y_majority)):
if correct_s[i] - y_majority[i] == 0.0:
score += 1.0
print( score / len(y_majority) )
# output
"""
with open(ofile, 'wt', newline='') as f:
writer = csv.writer(f)
writer.writerow(["PassengerId", "Survived"])
for i in range(len(y_majority)):
writer.writerow([x[i], y_majority[i]])
"""
# ------------------------------------------------
# execution
# ------------------------------------------------
if __name__ == "__main__":
main() | 26.268657 | 93 | 0.49375 |
fc891624af957e64cc2b4894fff16eb343846c1f | 317 | py | Python | app.py | leirbag95/instaget | c0403742d065ba7797039382f43323d265bde8bc | [
"Apache-2.0"
] | null | null | null | app.py | leirbag95/instaget | c0403742d065ba7797039382f43323d265bde8bc | [
"Apache-2.0"
] | null | null | null | app.py | leirbag95/instaget | c0403742d065ba7797039382f43323d265bde8bc | [
"Apache-2.0"
] | null | null | null | from src import InstaGet
from os import environ
import subprocess
import sys
if __name__ == "__main__":
try:
username = str(input("username: "))
except Exception as e:
raise Exception(e)
instaget = InstaGet(username=username)
profile = instaget.get_profile()
print(profile)
| 19.8125 | 43 | 0.671924 |
e63b81676f36c455ff1f220721726184bb8994b1 | 5,322 | py | Python | Gem/PythonTests/Automated/test_suites/periodic/PhysicalSkyComponent_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-07-18T11:20:41.000Z | 2022-02-01T20:17:50.000Z | Gem/PythonTests/Automated/test_suites/periodic/PhysicalSkyComponent_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 5 | 2021-07-14T02:24:07.000Z | 2021-10-04T21:24:35.000Z | Gem/PythonTests/Automated/test_suites/periodic/PhysicalSkyComponent_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 7 | 2021-07-06T18:21:14.000Z | 2021-12-06T09:12:40.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Hydra script that is used to test the Physical Sky component functionality inside the Editor.
Opens the MeshTest level and creates a "Skybox" entity and attaches a Physical Sky component.
It then modifies the Intensity Mode, Sky Intensity, Sun Intensity, Turbidity, and Sun Radius Factor propertie.s
A screenshot is taken once all options are set on the Physical Sky component.
Results are verified using log messages & screenshot comparisons diffed against golden images.
See the run() function for more in-depth test info.
"""
import os
import sys
import math
import azlmbr.editor
import azlmbr.legacy.general as general
import azlmbr.paths
from azlmbr.entity import EntityId
sys.path.append(os.path.join(azlmbr.paths.devassets, "Gem", "PythonTests"))
from Automated.atom_utils.automated_test_utils import TestHelper as helper
from Automated.atom_utils.screenshot_utils import ScreenshotHelper
COMPONENT_PROPERTIES = [
'Controller',
'Controller|Configuration',
'Controller|Configuration|Intensity Mode',
'Controller|Configuration|Sky Intensity',
'Controller|Configuration|Sun Intensity',
'Controller|Configuration|Turbidity',
'Controller|Configuration|Sun Radius Factor'
]
def run():
"""
Test Case - Physical Sky:
1. Opens the "EmptyLevel" level and creates a new "Skybox" entity with Physical Sky component attached.
2. Sets the Sky Intensity property to 3 (EV100).
3. Sets the Sun Intensity property to 4.
4. Sets the Turbidity property to 2.
5. Sets the Sun Radius Factor to 2.
6. Verifies all of the above properties were set correctly on the Physical Sky component.
7. Sets the transform value of the Physical Sky to azlmbr.math.Vector3(math.pi/8.0, 0.0, math.pi)
8. Enters game mode and takes a screenshot for comparison.
9. Closes the Editor and the test ends.
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
# Open MeshTest level.
helper.init_idle()
helper.open_level("MeshTest")
# Create "Skybox" entity and attach Physical Sky component to it, verify properties are valid for component.
myEntityId = azlmbr.editor.ToolsApplicationRequestBus(azlmbr.bus.Broadcast, 'CreateNewEntity', EntityId())
azlmbr.editor.EditorEntityAPIBus(azlmbr.bus.Event, 'SetName', myEntityId, "Skybox")
if myEntityId.IsValid():
general.log("Entity successfully created.")
component = helper.attach_component_to_entity(myEntityId, 'Physical Sky')
helper.compare_property_list(component, COMPONENT_PROPERTIES)
# Set component properties for Intensity Mode, Sun Intensity,
skyIntensityPath = COMPONENT_PROPERTIES[3]
sunIntensityPath = COMPONENT_PROPERTIES[4]
turbidityPath = COMPONENT_PROPERTIES[5]
sunRadiusPath = COMPONENT_PROPERTIES[6]
skyIntensityValue = 3 # in EV100
sunIntensityValue = 4 # in EV100
turbidityValue = 2
sunRadiusValue = 2
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', component, skyIntensityPath, skyIntensityValue)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', component, sunIntensityPath, sunIntensityValue)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', component, turbidityPath, turbidityValue)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', component, sunRadiusPath, sunRadiusValue)
# verify that physical sky component contains the expected values
componentProperty = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentProperty', component, skyIntensityPath)
if componentProperty.GetValue() == skyIntensityValue:
general.log("Sky Intensity is correctly set")
componentProperty = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentProperty', component, sunIntensityPath)
if componentProperty.GetValue() == sunIntensityValue:
general.log("Sun Intensity is correctly set")
componentProperty = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentProperty', component, turbidityPath)
if componentProperty.GetValue() == turbidityValue:
general.log("Turbidity is correctly set")
componentProperty = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentProperty', component, sunRadiusPath)
if componentProperty.GetValue() == sunRadiusValue:
general.log("Sun Radius is correctly set")
# Set transform of physical sky
eulerAngle = azlmbr.math.Vector3(math.pi/8.0, 0.0, math.pi)
azlmbr.components.TransformBus(azlmbr.bus.Event, 'SetLocalRotation', myEntityId, eulerAngle)
# generate screenshot and compare with golden
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking_in_game_mode(
'screenshot_atom_PhysicalSkyComponent.ppm')
helper.close_editor()
if __name__ == "__main__":
run()
| 42.919355 | 112 | 0.756295 |
b61d7e8dabafb604bdca63d8e4b563d89d442d3e | 1,538 | py | Python | sdk/python/pulumi_alicloud/gpdb/__init__.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/gpdb/__init__.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/gpdb/__init__.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .connection import *
from .elastic_instance import *
from .get_instances import *
from .get_zones import *
from .instance import *
from . import outputs
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "alicloud:gpdb/connection:Connection":
return Connection(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:gpdb/elasticInstance:ElasticInstance":
return ElasticInstance(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:gpdb/instance:Instance":
return Instance(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("alicloud", "gpdb/connection", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "gpdb/elasticInstance", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "gpdb/instance", _module_instance)
_register_module()
| 37.512195 | 97 | 0.691808 |
9456883c37da11e7315132d2928421e27defaad7 | 2,719 | py | Python | RSA/rsa.py | conejo11/PAA | a97debddd63925a5f54cd50443e49ad313e9b966 | [
"MIT"
] | null | null | null | RSA/rsa.py | conejo11/PAA | a97debddd63925a5f54cd50443e49ad313e9b966 | [
"MIT"
] | null | null | null | RSA/rsa.py | conejo11/PAA | a97debddd63925a5f54cd50443e49ad313e9b966 | [
"MIT"
] | 1 | 2019-10-16T14:38:47.000Z | 2019-10-16T14:38:47.000Z | import math
import random
def extendedEuclidean(a, b):
if a == 0:
return (b, 0, 1)
else:
gcd, x, y = extendedEuclidean(b % a, a)
return (gcd, y - b//a * x, x)
################################################################################
################################################################################
################################################################################
def fermatPrimalityTest(n):
if n > 1:
for t in range(50):
rand = random.randint(2, n)-1
if pow(rand,n-1,n) != 1:
return False
return True
else:
return False
def geraPrimo(bits):
n = random.randint(0, pow(2, bits))
if n % 2 == 0:
n = n+1
while not fermatPrimalityTest(n):
# print(n)
n += 2
return n
def chavePublica(bits):
p = geraPrimo(bits)
q = geraPrimo(bits)
while p == q:
q = geraPrimo(bits)
e = random.randint(0, pow(2,16))
while math.gcd(e, (p-1)*(q-1)) != 1:
e = random.randint(0, pow(2,16))
return (e, p, q)
def chavePrivada(e, p, q):
d = extendedEuclidean(e, (p-1)*(q-1))[1]
if d < 1:
# print('burro')
d = d + ((p-1)*(q-1))
# print(d)
return (int(d), p * q)
def criptografa(mensagem, e, n):
cript = []
for c in mensagem:
cript.append(pow(ord(c), e, n))
return cript
def descriptografa(mensagem, d, n):
decript = []
for i in mensagem:
#print(pow(i, d, n))
decript.append(chr(pow(int(i), d, n)))
return decript
def quebra_forcabruta(n):
p = int(math.sqrt(n)) + 1
if p % 2 == 0:
p += 1
while n % p != 0:
p -= 2
q = n // p
return (p, q)
def G(x,c):
return pow(x,2) + c
def pollard_rho(n):
x = random.randint(1, n)
c = random.randint(1, n)
y = x
p = 1
while p == 1:
x = G(x, c) % n
y = G(G(y, c), c) % n
p = math.gcd(abs(x-y), n)
return (p, n // p)
def main():
bits = 64
msg = open("mensagem.csv", "r")
encripted = open("cript.csv", "w")
uncripted = open("uncript.csv", "w")
e, p, q = chavePublica(bits)
d, n = chavePrivada(e, p, q)
a = criptografa(msg.read(), e, n)
# msg.truncate(0)
# print(msg.read())
for i in a:
encripted.write(str(i))
encripted.write(' ')
encripted.close()
encripted = open("cript.csv", "r")
c = encripted.read().split()
print(c==a)
print(a)
print('\n')
print('\n')
print(c)
b = descriptografa(c, d, n)
print(b)
for j in b:
uncripted.write(str(j))
msg.close()
encripted.close()
uncripted.close()
print(d)
print((p,q))
print(pollard_rho(n))
#print(quebra_forcabruta(n))
# print(descriptografa(a, d, n))
# print((p,q))
# print(quebra_forcabruta(n, bits))
if __name__ == '__main__':
main()
| 18.623288 | 80 | 0.500552 |
a4104bd0c94ca62ea7fc1647ea97c5112f244ce2 | 7,892 | py | Python | src/schnetpack/nn/base.py | lizhifeng1998/schnetpack | 7a665f19b785addcc8f3e0abf9b1fd8fa6fe2c3d | [
"MIT"
] | 450 | 2018-09-04T08:37:47.000Z | 2022-03-30T08:05:37.000Z | src/schnetpack/nn/base.py | lizhifeng1998/schnetpack | 7a665f19b785addcc8f3e0abf9b1fd8fa6fe2c3d | [
"MIT"
] | 239 | 2018-09-11T21:09:08.000Z | 2022-03-18T09:25:11.000Z | src/schnetpack/nn/base.py | lizhifeng1998/schnetpack | 7a665f19b785addcc8f3e0abf9b1fd8fa6fe2c3d | [
"MIT"
] | 166 | 2018-09-13T13:01:06.000Z | 2022-03-31T12:59:12.000Z | import torch
from torch import nn
from torch.nn.init import xavier_uniform_
from schnetpack.nn.initializers import zeros_initializer
__all__ = ["Dense", "GetItem", "ScaleShift", "Standardize", "Aggregate"]
class Dense(nn.Linear):
r"""Fully connected linear layer with activation function.
.. math::
y = activation(xW^T + b)
Args:
in_features (int): number of input feature :math:`x`.
out_features (int): number of output features :math:`y`.
bias (bool, optional): if False, the layer will not adapt bias :math:`b`.
activation (callable, optional): if None, no activation function is used.
weight_init (callable, optional): weight initializer from current weight.
bias_init (callable, optional): bias initializer from current bias.
"""
def __init__(
self,
in_features,
out_features,
bias=True,
activation=None,
weight_init=xavier_uniform_,
bias_init=zeros_initializer,
):
self.weight_init = weight_init
self.bias_init = bias_init
self.activation = activation
# initialize linear layer y = xW^T + b
super(Dense, self).__init__(in_features, out_features, bias)
def reset_parameters(self):
"""Reinitialize model weight and bias values."""
self.weight_init(self.weight)
if self.bias is not None:
self.bias_init(self.bias)
def forward(self, inputs):
"""Compute layer output.
Args:
inputs (dict of torch.Tensor): batch of input values.
Returns:
torch.Tensor: layer output.
"""
# compute linear layer y = xW^T + b
y = super(Dense, self).forward(inputs)
# add activation function
if self.activation:
y = self.activation(y)
return y
class GetItem(nn.Module):
"""Extraction layer to get an item from SchNetPack dictionary of input tensors.
Args:
key (str): Property to be extracted from SchNetPack input tensors.
"""
def __init__(self, key):
super(GetItem, self).__init__()
self.key = key
def forward(self, inputs):
"""Compute layer output.
Args:
inputs (dict of torch.Tensor): SchNetPack dictionary of input tensors.
Returns:
torch.Tensor: layer output.
"""
return inputs[self.key]
class ScaleShift(nn.Module):
r"""Scale and shift layer for standardization.
.. math::
y = x \times \sigma + \mu
Args:
mean (torch.Tensor): mean value :math:`\mu`.
stddev (torch.Tensor): standard deviation value :math:`\sigma`.
"""
def __init__(self, mean, stddev):
super(ScaleShift, self).__init__()
self.register_buffer("mean", mean)
self.register_buffer("stddev", stddev)
def forward(self, input):
"""Compute layer output.
Args:
input (torch.Tensor): input data.
Returns:
torch.Tensor: layer output.
"""
y = input * self.stddev + self.mean
return y
class Standardize(nn.Module):
r"""Standardize layer for shifting and scaling.
.. math::
y = \frac{x - \mu}{\sigma}
Args:
mean (torch.Tensor): mean value :math:`\mu`.
stddev (torch.Tensor): standard deviation value :math:`\sigma`.
eps (float, optional): small offset value to avoid zero division.
"""
def __init__(self, mean, stddev, eps=1e-9):
super(Standardize, self).__init__()
self.register_buffer("mean", mean)
self.register_buffer("stddev", stddev)
self.register_buffer("eps", torch.ones_like(stddev) * eps)
def forward(self, input):
"""Compute layer output.
Args:
input (torch.Tensor): input data.
Returns:
torch.Tensor: layer output.
"""
# Add small number to catch divide by zero
y = (input - self.mean) / (self.stddev + self.eps)
return y
class Aggregate(nn.Module):
"""Pooling layer based on sum or average with optional masking.
Args:
axis (int): axis along which pooling is done.
mean (bool, optional): if True, use average instead for sum pooling.
keepdim (bool, optional): whether the output tensor has dim retained or not.
"""
def __init__(self, axis, mean=False, keepdim=True):
super(Aggregate, self).__init__()
self.average = mean
self.axis = axis
self.keepdim = keepdim
def forward(self, input, mask=None):
r"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
# mask input
if mask is not None:
input = input * mask[..., None]
# compute sum of input along axis
y = torch.sum(input, self.axis)
# compute average of input along axis
if self.average:
# get the number of items along axis
if mask is not None:
N = torch.sum(mask, self.axis, keepdim=self.keepdim)
N = torch.max(N, other=torch.ones_like(N))
else:
N = input.size(self.axis)
y = y / N
return y
class MaxAggregate(nn.Module):
"""Pooling layer that computes the maximum for each feature over all atoms
Args:
axis (int): axis along which pooling is done.
"""
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, input, mask=None):
r"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
# mask input
if mask is not None:
# If the mask is lower dimensional than the array being masked,
# inject an extra dimension to the end
if mask.dim() < input.dim():
mask = torch.unsqueeze(mask, -1)
input = torch.where(mask > 0, input, torch.min(input))
# compute sum of input along axis
return torch.max(input, self.axis)[0]
class SoftmaxAggregate(nn.Module):
"""Pooling layer that computes the maximum for each feature over all atoms
using the "softmax" function to weigh the contribution of each atom to
the "maximum."
Args:
axis (int): axis along which pooling is done.
"""
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, input, mask=None):
r"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
# Compute the sum of exponentials for the desired axis
exp_input = torch.exp(input)
# Set the contributions of "masked" atoms to zero
if mask is not None:
# If the mask is lower dimensional than the array being masked,
# inject an extra dimension to the end
if mask.dim() < input.dim():
mask = torch.unsqueeze(mask, -1)
exp_input = torch.where(mask > 0, exp_input, torch.zeros_like(exp_input))
# Sum exponentials along the desired axis
exp_input_sum = torch.sum(exp_input, self.axis, keepdim=True)
# Normalize the exponential array by the
weights = exp_input / exp_input_sum
# compute sum of input along axis
output = torch.sum(input * weights, self.axis)
return output
| 28.490975 | 85 | 0.592499 |
b293ddfa7ba20f5b009ec834da13ed062057e693 | 420 | py | Python | src/junit_report/__init__.py | eliorerz/junit-report | e0ed2c5b5b38500b020a2dc09a7ff172d01609ed | [
"MIT"
] | 4 | 2021-03-30T16:24:40.000Z | 2021-04-04T12:43:03.000Z | src/junit_report/__init__.py | eliorerz/junit-report | e0ed2c5b5b38500b020a2dc09a7ff172d01609ed | [
"MIT"
] | 2 | 2021-04-05T08:49:22.000Z | 2021-04-11T10:19:53.000Z | src/junit_report/__init__.py | eliorerz/junit-report | e0ed2c5b5b38500b020a2dc09a7ff172d01609ed | [
"MIT"
] | null | null | null | from .decorators import JunitFixtureTestCase, DuplicateSuiteError, JunitTestCase, JunitTestSuite, TestCaseCategories
from .json_junit_exporter import JsonJunitExporter, CaseFormatKeys
from .utils import CaseFailure
__all__ = [
"JunitTestCase",
"CaseFailure",
"TestCaseCategories",
"JunitFixtureTestCase",
"JunitTestSuite",
"JsonJunitExporter",
"CaseFormatKeys",
"DuplicateSuiteError",
]
| 28 | 116 | 0.769048 |
05f9ae21b174d31f909f15c00df2952244050ffa | 215,624 | py | Python | tensorflow/python/framework/ops.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/python/framework/ops.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/framework/ops.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 26 | 2017-04-12T16:25:44.000Z | 2018-10-30T10:10:15.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import linecache
import os
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = os.getenv("TF_C_API_GRAPH_CONSTRUCTION_SHAPES", "1") != "0"
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
if not _USE_C_SHAPES:
# Attributes used for C++ shape inference. Not inspected, only forwarded.
# If set, will be a HandleData object from cpp_shape_inference.proto.
self._handle_data = None
self._id = uid()
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
if _USE_C_SHAPES:
self._shape_val = self._c_api_shape()
else:
# Call set_shape_and_handle_data_for_outputs in topological order on all
# ops that are needed to compute self.op's shape. We do this instead of
# having set_shape_and_handle_data_for_outputs recursively call
# Operation.shape on self.op.inputs to overflowing the call stack.
need_shapes = self._get_input_ops_without_shapes(self.op)
need_shapes.sort(key=lambda op: op._id)
for op in need_shapes:
set_shape_and_handle_data_for_outputs(op)
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited: continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are not iterable when eager execution is not "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
if _USE_C_SHAPES: # pylint: disable=protected-access
# Reset cached shape.
self._shape_val = None
else:
self._shape_val = self.shape.merge_with(shape)
# Update C shape even if _USE_C_SHAPES = False, since we still want
# set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
return c_api_util.tf_output(self.op._c_op, self.value_index)
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name, (", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name)
if self._dtype else "", (", device=%s" % self.device)
if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
def __copy__(self):
# Make sure _shape_val is computed before we copy.
# TODO(b/77597810): get rid of Tensor copies.
if self._shape_val is None:
set_shape_and_handle_data_for_outputs(self.op)
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
return self.cpu()._numpy() # pylint: disable=protected-access
# __int__ and __float__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self.numpy())
def __float__(self):
return float(self.numpy())
def __array__(self, dtype=None):
return np.array(self.numpy(), dtype=dtype)
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def _numpy(self):
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self),
self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [dresult._copy(device_name=self_device)]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.cpu().numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?"
)
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
register_dense_tensor_like_type(Tensor)
@tf_export("convert_to_tensor")
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: Optional: The value of context.context().
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
if ctx is None: ctx = context.context()
if isinstance(value, EagerTensor):
if ctx.executing_eagerly():
# Fast path for EagerTensors that don't need any conversion.
# Note that we don't check that value's dtype matches the dtype
# argument. We expect that the C runtime will do that checking
# when we execute the kernel.
return value
else:
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
if ctx is None: ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
@tf_export("convert_to_tensor_or_indexed_slices")
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, EagerTensor) and not context.executing_eagerly():
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
# context._context is checked so that we don't inadvertently create it.
# This is because enable_eager_execution will fail when called from the main
# function if the context._context is already created, and the
# register_tensor_conversion_function calls happen when the module is
# imported.
if context._context is not None and context.executing_eagerly(
) and isinstance(base_type, six.integer_types + (
float,
np.ndarray,
)):
# TODO(nareshmodi): consider setting a context variable which disables the
# fastpath instead.
raise TypeError(
"Cannot register conversions for numpy arrays, python number types "
"when executing eagerly.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values, (", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype)
for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs],
input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._original_op = original_op
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access
# Initialize self._c_op.
if c_op:
self._c_op = c_op
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [
compat.as_bytes("loc:@%s" % self.name)
]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
compat.as_str(_device_string(device)))
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Make sure output shapes are already computed for this op in case we create
# a cycle (we cannot compute shapes for cycles). Usually shapes are computed
# lazily upon request.
if not _USE_C_SHAPES:
set_shape_and_handle_data_for_outputs(self)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return self._graph._convert_stack( # pylint: disable=protected-access
self._traceback,
include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
if f == "type":
return dtypes.as_dtype(getattr(x, f))
else:
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@tf_export("NoGradient", "NotDifferentiable")
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def _set_shape_and_handle_data_for_outputs_c_api(op):
"""Set shapes and resource handle data using info from the C API."""
assert not _USE_C_SHAPES
for output in op.outputs:
output._shape_val = output._c_api_shape()
# Set the resource handle data for compatibility with the Python shape
# inference code.
serialized = c_api.GetResourceHandleShapeAndType(op._graph._c_graph,
output._as_tf_output())
if serialized:
output._handle_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData
.FromString(compat.as_bytes(serialized)))
else:
output._handle_data = None
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def set_shape_and_handle_data_for_outputs(op):
"""Set the shapes and resource handle data for op's outputs.
When _USE_C_SHAPES = False, this is lazily called when a tensor's shape is
first requested. Usually this should work automatically, but some edge cases
may require manually calling this first to make sure Tensor._shape_val and
Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a
Tensor).
"""
if _USE_C_SHAPES: return
if op.graph._is_function(op.type):
for output in op.outputs:
output._shape_val = tensor_shape.unknown_shape()
return
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_datas = shapes_dict["handle_data"]
for output, handle_data in zip(op.outputs, handle_datas):
# Don't override any existing handle data that may have been manually set.
# pylint: disable=protected-access
if output._handle_data is None:
output._handle_data = handle_data
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output._shape_val = tensor_shape.unknown_shape()
output._shape_val = output._shape_val.merge_with(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors, as well as
# synchronizes Session.run calls with methods that create and mutate ops
# (e.g. Graph.create_op()). This synchronization is necessary because it's
# illegal to modify an operation after it's been run. Thread-safety is
# provided on a best-effort basis to support buggy programs, and is not
# guaranteed by the public `tf.Graph` API.
#
# The lock must be reentrant because create_op can be called recursively due
# to control flow. Without a reentrant lock, many methods would also need a
# "locked" version or parameter (including generated code).
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# After switch_to_thread_local(), self._thread_local._device_function_stack
# is used instead.
self._graph_device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# After switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. After switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None.
self._last_loss_reduction = None
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
if self._use_c_api_hack():
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
else:
self._scoped_c_graph = None
# TODO(apassos) remove once the C API is used by default.
def _use_c_api_hack(self):
"""Temporary hack; can be overridden to force C API usage."""
return _USE_C_API
def _convert_stack(self, stack, include_func_start_lineno=False):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno,
unused_frame_info) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator):
# This step makes a copy of the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = list(self._variable_creator_stack)
self._thread_local._variable_creator_stack.append(creator)
try:
yield
finally:
self._thread_local._variable_creator_stack = old
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = []
return list(self._thread_local._variable_creator_stack)
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack
def _extract_stack(self):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Derived classes can implement _extract_frame_info() to add extra information
to the traceback.
Returns:
A list of 6-tuples
(filename, lineno, name, frame_globals, func_start_lineno, custom_info)
corresponding to the call stack of the current thread.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
frame_info = self._extract_frame_info(f)
ret.append((filename, lineno, name, frame_globals, func_start_lineno,
frame_info))
f = f.f_back
ret.reverse()
return ret
def _extract_frame_info(self, frame): # pylint: disable=unused-argument
"""Extracts custom information from a frame in an op traceback."""
return None
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
# Handle functions created without using the C API. TODO(apassos,skyewm)
# remove this when all functions are generated using the C API by default
# as this will be unnecessary.
if not function._c_func:
serialized = function.definition.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
function._c_func = c_api_util.ScopedTFFunction(c_func)
gradient = (function._grad_func._c_func.func if function._grad_func
else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[name] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. _lock ensures a Session.run
# call cannot occur between creating and mutating the op.
with self._lock:
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" % (key,
value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to provide
# consistency between the device and the colocation property.
if (op.device and pydev.canonical_name(op.device) !=
pydev.canonical_name(colocation_op.device)):
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. Postponing "
"error-checking until all devices are assigned.",
op.name, colocation_op.name, op.device,
colocation_op.device)
else:
op._set_device(colocation_op.device) # pylint: disable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr("_class", attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op.op_def.is_stateful:
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed
to compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
# Operations created by the C API always retrieve shapes from the C API so
# we preserve the shapes of ops created in import_graph_def (from the
# "_output_shapes" attr of the imported NodeDef).
if not _USE_C_SHAPES:
_set_shape_and_handle_data_for_outputs_c_api(op)
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." % (repr(name), repr(op_name),
len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__,
types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i-1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
try:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
"""
# pylint: enable=line-too-long
if (device_name_or_function is not None and
not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op)) # pylint: disable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
if isinstance(c, IndexedSlices):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_device_function_stack"):
self._thread_local._device_function_stack = (
self._graph_device_function_stack[:])
return self._thread_local._device_function_stack
else:
return self._graph_device_function_stack
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
self._thread_local._device_function_stack = device_function_stack
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_colocation_stack"):
self._thread_local._colocation_stack = self._graph_colocation_stack[:]
return self._thread_local._colocation_stack
else:
return self._graph_colocation_stack
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
self._thread_local._colocation_stack = colocation_stack
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export("device")
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
# TODO(agarwal): support device functions in EAGER mode.
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("container")
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
return device(op.device)
else:
return _NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
@tf_export("colocate_with")
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies. If eager execution is enabled, any callable object in the
`control_inputs` list will be called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return _NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export("get_default_session")
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
try:
context.context().context_switches.push(
default.building_function, default.as_default)
with super(_DefaultGraphStack, self).get_controller(
default) as g, context.graph_mode():
yield g
finally:
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
default_graph = get_default_graph()
scope = default_graph.get_name_scope()
if scope and scope[-1] != '/':
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + '/'
inner_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
outer_context = None
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = inner_device_stack # pylint: disable=protected-access
yield
finally:
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export("enable_eager_execution")
def enable_eager_execution(config=None,
device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a @{tf.Session}) and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A @{tf.ConfigProto} to use to configure the environment
in which operations are executed. Note that @{tf.ConfigProto} is also
used to configure graph execution (via @{tf.Session}) and many options
within `tf.ConfigProto` are not implemented (or are irrelevant) when
eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
return enable_eager_execution_internal(
config, device_policy, execution_mode, None)
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError(
"config must be a tf.ConfigProto, but got %s" % type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
# pylint: disable=protected-access
if context._default_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_session_stack.stack
or len(get_default_graph().get_operations()) > 0) # pylint: disable=g-explicit-length-test
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context._default_mode = context.EAGER_MODE
if context._context is None:
context._context = context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def)
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError("Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config,
context._context._device_policy, device_policy,
context._context._execution_mode, execution_mode))
else:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export("reset_default_graph")
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export("get_default_graph")
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." % (item,
original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
@tf_export("GraphKeys")
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.log_first_n(logging.WARN,
"VARIABLES collection name is deprecated, please use "
"GLOBAL_VARIABLES instead; VARIABLES will be removed "
"after 2017-03-02.", 1)
return cls.GLOBAL_VARIABLES
@tf_export("add_to_collection")
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export("add_to_collections")
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export("get_collection_ref")
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export("get_collection")
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
name_scope_cache = {}
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("name_scope", "keras.backend.name_scope")
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
"""
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._in_eager_mode:
self._old_name = self._ctx.scope_name
if not self._name:
scope_name = ""
else:
cache_key = self._name, self._old_name, self._default_name
if cache_key in name_scope_cache:
self._ctx.scope_name = name_scope_cache[cache_key]
return self._ctx.scope_name
elif self._name[-1] == "/":
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = self._name
else:
name_with_trailing_slash = self._name + "/"
scope_name = (
self._old_name + name_with_trailing_slash
if self._old_name else name_with_trailing_slash)
name_scope_cache[cache_key] = scope_name
self._ctx.scope_name = scope_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export("op_scope")
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype,
name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| 36.227151 | 116 | 0.685708 |
a9d5ae8df8d4e4f3a51b2f22f62d88bf2d2f91ab | 3,763 | py | Python | webauthn_rp/utils.py | enceladus-rex/webauthn-rp | 71037d19889c4dfd18c42d31e8a44542ee2b34f7 | [
"MIT"
] | 3 | 2020-10-17T00:29:54.000Z | 2021-07-30T13:33:31.000Z | webauthn_rp/utils.py | enceladus-rex/webauthn-rp | 71037d19889c4dfd18c42d31e8a44542ee2b34f7 | [
"MIT"
] | 8 | 2020-10-30T04:32:32.000Z | 2022-03-12T09:38:07.000Z | webauthn_rp/utils.py | enceladus-rex/webauthn-rp | 71037d19889c4dfd18c42d31e8a44542ee2b34f7 | [
"MIT"
] | 1 | 2020-03-16T20:51:00.000Z | 2020-03-16T20:51:00.000Z | import base64
import re
from typing import TYPE_CHECKING, Dict, Type, Union
from cryptography.hazmat.primitives.hashes import (SHA256, SHA384, SHA512,
HashAlgorithm)
from webauthn_rp.constants import (ED448_COORDINATE_BYTE_LENGTH,
ED25519_COORDINATE_BYTE_LENGTH,
P_256_COORDINATE_BYTE_LENGTH,
P_384_COORDINATE_BYTE_LENGTH,
P_521_COORDINATE_BYTE_LENGTH)
if TYPE_CHECKING:
from webauthn_rp import types
__all__ = [
'snake_to_camel_case',
'camel_to_snake_case',
'url_base64_encode',
'url_base64_decode',
'curve_coordinate_byte_length',
'ec2_hash_algorithm',
]
_CURVE_COORDINATE_BYTE_LENGTHS = {
'P_256': P_256_COORDINATE_BYTE_LENGTH,
'P_384': P_384_COORDINATE_BYTE_LENGTH,
'P_521': P_521_COORDINATE_BYTE_LENGTH,
'ED25519': ED25519_COORDINATE_BYTE_LENGTH,
'ED448': ED448_COORDINATE_BYTE_LENGTH,
}
_EC2_HASH_ALGORITHMS: Dict[str, Type[HashAlgorithm]] = {
'ES256': SHA256,
'ES384': SHA384,
'ES512': SHA512,
}
def snake_to_camel_case(s: str) -> str:
"""Convert a snake cased string into camel case.
Args:
s (str): A snake cased string.
Returns:
The camel case converted string.
"""
chunks = [x for x in re.split(r'_+', s) if x]
capped = [x[0].upper() + x[1:] for x in chunks[1:]]
if chunks:
return chunks[0] + ''.join(capped)
return ''
def camel_to_snake_case(s: str) -> str:
"""Convert a camel cased string into snake case.
Args:
s (str): A camel cased string.
Returns:
The snake case converted string.
"""
words = []
s_index = 0
for i in range(len(s)):
if s[i].isupper():
words.append(s[s_index:i].lower())
s_index = i
if s_index < len(s): words.append(s[s_index:].lower())
return '_'.join(words)
def url_base64_encode(b: bytes) -> bytes:
"""Base64 encode raw bytes using URL semantics.
Args:
b (bytes): The raw bytes to encode.
Returns:
The base64-encoded bytes.
References:
* https://tools.ietf.org/html/rfc4648#section-5
"""
return base64.b64encode(b, b'-_')
def url_base64_decode(s: str) -> bytes:
"""Base64 decode a string using URL semantics.
Args:
s (str): The string to decode.
Returns:
The base64-decoded bytes.
References:
* https://tools.ietf.org/html/rfc4648#section-5
"""
return base64.b64decode(s + '===', b'-_')
def curve_coordinate_byte_length(
crv: Union['types.EC2Curve.Name', 'types.EC2Curve.Value',
'types.OKPCurve.Name', 'types.OKPCurve.Value']
) -> int:
"""Get the fixed number of bytes that an elliptic curve coordinate takes.
Args:
crv (Union['types.EC2Curve.Name', 'types.EC2Curve.Value',
'types.OKPCurve.Name', 'types.OKPCurve.Value']): The elliptic curve.
Returns:
The byte length.
"""
assert crv.name in _CURVE_COORDINATE_BYTE_LENGTHS, 'Unexpected curve'
return _CURVE_COORDINATE_BYTE_LENGTHS[crv.name]
def ec2_hash_algorithm(
alg: Union['types.COSEAlgorithmIdentifier.Name',
'types.COSEAlgorithmIdentifier.Value']
) -> HashAlgorithm:
"""Get a `HashAlgorithm` instance from an algorithm identifier.
Args:
alg (Union['types.COSEAlgorithmIdentifier.Name',
'types.COSEAlgorithmIdentifier.Value']): A cryptography `HashAlgorithm`
instance for the given algorithm.
Returns:
A `HashAlgorithm` instance.
"""
assert alg.name in _EC2_HASH_ALGORITHMS, 'Invalid COSE algorithm'
return _EC2_HASH_ALGORITHMS[alg.name]()
| 27.071942 | 79 | 0.636726 |
1ad748b3512667031571437b38a0099ef8c01df6 | 16,525 | py | Python | xtreme-view/vsynthlib/deepmvs_wrapper.py | NVlabs/extreme-view-synth | 2820ffdda9f44e70cd2fdd0845ec9145293e4183 | [
"BSD-2-Clause"
] | 80 | 2019-08-15T08:10:38.000Z | 2022-01-22T10:54:19.000Z | xtreme-view/vsynthlib/deepmvs_wrapper.py | NVlabs/extreme-view-synth | 2820ffdda9f44e70cd2fdd0845ec9145293e4183 | [
"BSD-2-Clause"
] | 9 | 2019-11-07T09:43:14.000Z | 2021-01-18T05:57:39.000Z | xtreme-view/vsynthlib/deepmvs_wrapper.py | NVlabs/extreme-view-synth | 2820ffdda9f44e70cd2fdd0845ec9145293e4183 | [
"BSD-2-Clause"
] | 8 | 2019-11-12T19:18:01.000Z | 2021-03-16T06:11:58.000Z | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the NVIDIA Source Code License. See LICENSE.md at https://github.com/NVlabs/extreme-view-synth.
Authors: Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H. Kim, and Jan Kautz
"""
import torch
import torchvision as vision
import torch.nn.functional as F
from torch.autograd import Variable
import pydensecrf.densecrf as dcrf
import numpy as np
import cv2
from DeepMVS.model import DeepMVS
class DeepMVSWrapper(object):
def __init__(self, filename_DeepMVS,
n_depths=100,
enable_CUDA=True,
do_filter=True):
self.dev_id = 0
if torch.cuda.device_count() > 1:
self.dev_id = 1
self.model_deepMVS = DeepMVS(n_depths, use_gpu=enable_CUDA, gpu_id=self.dev_id)
self.model_deepMVS.load_state_dict(torch.load(filename_DeepMVS))
self.model_deepMVS.share_memory()
print('DeepMVS model loaded!', filename_DeepMVS)
if enable_CUDA:
self.model_VGGNet = vision.models.vgg19(pretrained=True).cuda(self.dev_id)
else:
self.model_VGGNet = vision.models.vgg19(pretrained=True)
self.model_VGGNet.share_memory()
self.model_VGGNet_normalize\
= vision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
print('VGGNET model loaded!')
# Constants for DenseCRF.
self.dict_DenseCRF = dict()
######################################
# default from DeepMVS
######################################
# self.dict_DenseCRF['sigma_xy'] = 80.0
# self.dict_DenseCRF['sigma_rgb'] = 15.0
# self.dict_DenseCRF['sigma_d'] = 10.0
# self.dict_DenseCRF['iteration_num'] = 5
# compat = np.zeros((n_depths, n_depths), dtype=np.float32)
# for row in range(0, n_depths):
# for col in range(0, n_depths):
# compat[row, col] = (row - col) ** 2 / self.dict_DenseCRF['sigma_d'] ** 2 / 2
# self.dict_DenseCRF['compat'] = compat
#####################################
######################################
# For museum and others
######################################
self.dict_DenseCRF['sigma_xy'] = 30.0
self.dict_DenseCRF['sigma_rgb'] = 3
self.dict_DenseCRF['iteration_num'] = 20
self.dict_DenseCRF['compat'] = 10.0
# for high res
# self.dict_DenseCRF['sigma_xy'] = 60
# self.dict_DenseCRF['sigma_rgb'] = 3.0
# self.dict_DenseCRF['iteration_num'] = 20
# self.dict_DenseCRF['compat'] = 10.0
######################################
# For bikes of StereoMagnificiation
######################################
# self.dict_DenseCRF['sigma_xy'] = 25.0
# self.dict_DenseCRF['sigma_rgb'] = 10.0
# self.dict_DenseCRF['iteration_num'] = 5
# self.dict_DenseCRF['compat'] = 5.0
self.n_depths = n_depths
self.patch_size = 128
self.stride = int(self.patch_size/2)
self.do_filter = do_filter
def build_PSV(self, list_src_img, list_src_cam, ref_idx,
height, width,
min_disp, disp_step, max_depth):
n_neighbors = len(list_src_img) - 1
PSV = np.zeros(shape=[n_neighbors, self.n_depths, height, width, 3], dtype=np.float32)
cam_param_ref = list_src_cam[ref_idx]
int_mat_ref = cam_param_ref['intrinsic']
fx_ref = int_mat_ref[0, 0]
fy_ref = int_mat_ref[1, 1]
cx_ref = int_mat_ref[0, 2]
cy_ref = int_mat_ref[1, 2]
ext_ref = cam_param_ref['extrinsic']
inv_ext_ref = np.linalg.inv(ext_ref)
# for each neighbor image
counter_img = 0
for i in range(len(list_src_img)):
if i == ref_idx:
continue
img_i = list_src_img[i]
cam_param_i = list_src_cam[i]
# get the parameters
int_mat = cam_param_i['intrinsic']
fx_i = int_mat[0, 0]
fy_i = int_mat[1, 1]
cx_i = int_mat[0, 2]
cy_i = int_mat[1, 2]
ext_i = cam_param_i['extrinsic']
# 4 Corners on the virtual camera to get te 4 rays that intersect with the depth plane
src_pts = np.reshape([0, 0,
width, 0,
width, height,
0, height], (4, 2))
# for each depth plane
for d in range(self.n_depths):
disp = d * disp_step + min_disp
if d == 0:
depth = max_depth
else:
depth = 1.0 / disp
# print(depth)
# compute dst points
dst_pts = np.zeros((4, 2))
counter_pt = 0
for p in src_pts:
p_3D_ref = np.asarray([(depth * p[0] - depth * cx_ref) / fx_ref,
(depth * p[1] - depth * cy_ref) / fy_ref,
depth])
p_4D_ref = np.array([p_3D_ref[0], p_3D_ref[1], p_3D_ref[2], 1.0])
p_4D_world = inv_ext_ref.dot(p_4D_ref)
p_4D_i = ext_i.dot(p_4D_world)
dst = np.asarray([cx_i + fx_i * p_4D_i[0] / p_4D_i[2], cy_i + fy_i * p_4D_i[1] / p_4D_i[2]])
dst_pts[counter_pt, :] = dst.squeeze()
counter_pt += 1
# compute homography
M, mask = cv2.findHomography(dst_pts, src_pts)
# warp the image
result = cv2.warpPerspective(img_i, M, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REPLICATE)
# cv2.imshow("img_ref", img_ref)
# cv2.imshow("PSV of img %02d" % (i), result)
# cv2.waitKey()
PSV[counter_img, d, :, :, :] = result
counter_img += 1
return PSV
def perform_DeepMVS(self, list_img, ref_idx, PSV,
height, width, batch_size=1, use_gpu=True):
# Generate VGG features.
with torch.no_grad():
VGG_tensor = Variable(
self.model_VGGNet_normalize(torch.FloatTensor(list_img[ref_idx].copy())).permute(2, 0, 1).unsqueeze(0))
if use_gpu:
VGG_tensor = VGG_tensor.cuda(self.dev_id)
VGG_scaling_factor = 0.01
for i in range(0, 4):
VGG_tensor = self.model_VGGNet.features[i].forward(VGG_tensor)
if use_gpu:
feature_input_1x_whole = VGG_tensor.data.cpu().clone() * VGG_scaling_factor
else:
feature_input_1x_whole = VGG_tensor.data.clone() * VGG_scaling_factor
for i in range(4, 9):
VGG_tensor = self.model_VGGNet.features[i].forward(VGG_tensor)
if use_gpu:
feature_input_2x_whole = VGG_tensor.data.cpu().clone() * VGG_scaling_factor
else:
feature_input_2x_whole = VGG_tensor.data.clone() * VGG_scaling_factor
for i in range(9, 14):
VGG_tensor = self.model_VGGNet.features[i].forward(VGG_tensor)
if use_gpu:
feature_input_4x_whole = VGG_tensor.data.cpu().clone() * VGG_scaling_factor
else:
feature_input_4x_whole = VGG_tensor.data.clone() * VGG_scaling_factor
for i in range(14, 23):
VGG_tensor = self.model_VGGNet.features[i].forward(VGG_tensor)
if use_gpu:
feature_input_8x_whole = VGG_tensor.data.cpu().clone() * VGG_scaling_factor
else:
feature_input_8x_whole = VGG_tensor.data.clone() * VGG_scaling_factor
for i in range(23, 32):
VGG_tensor = self.model_VGGNet.features[i].forward(VGG_tensor)
if use_gpu:
feature_input_16x_whole = VGG_tensor.data.cpu().clone() * VGG_scaling_factor
else:
feature_input_16x_whole = VGG_tensor.data.clone() * VGG_scaling_factor
del VGG_tensor
# Stride through entire reference image.
predict_raw = torch.zeros(self.n_depths, height, width)
border_x = (self.patch_size - self.stride) / 2
border_y = (self.patch_size - self.stride) / 2
col_total = int((width - 2 * border_x - 1) / self.stride) + 1
row_total = int((height - 2 * border_y - 1) / self.stride) + 1
for row_idx in range(0, row_total):
for col_idx in range(0, col_total):
# Compute patch location for this patch and next patch.
if col_idx != col_total - 1:
start_x = col_idx * self.stride
else:
start_x = width - self.patch_size
if row_idx != row_total - 1:
start_y = row_idx * self.stride
else:
start_y = height - self.patch_size
# Read plane-sweep volume and start next patch.
ref_img = list_img[ref_idx][start_y:(start_y + self.patch_size), start_x:(start_x + self.patch_size),
:].copy() - 0.5
sweep_volume = PSV[:, :, start_y:(start_y + self.patch_size), start_x:(start_x + self.patch_size),
:].copy() - 0.5
num_neighbors = len(list_img) - 1
# Prepare the inputs.
data_in_tensor = torch.FloatTensor(batch_size, 1, self.n_depths, 2, 3, self.patch_size, self.patch_size)
ref_img_tensor = torch.FloatTensor(ref_img).permute(2, 0, 1).unsqueeze(0)
data_in_tensor[0, 0, :, 0, ...] = ref_img_tensor.expand(self.n_depths, -1, -1, -1)
with torch.no_grad():
feature_input_1x \
= Variable(
feature_input_1x_whole[..., start_y:start_y + self.patch_size, start_x:start_x + self.patch_size])
feature_input_2x \
= Variable(
feature_input_2x_whole[..., int(start_y / 2):int(start_y / 2) + int(self.patch_size / 2),
int(start_x / 2):int(start_x / 2) + int(self.patch_size / 2)])
feature_input_4x \
= Variable(
feature_input_4x_whole[..., int(start_y / 4):int(start_y / 4) + int(self.patch_size / 4),
int(start_x / 4):int(start_x / 4) + int(self.patch_size / 4)])
feature_input_8x \
= Variable(
feature_input_8x_whole[..., int(start_y / 8):int(start_y / 8) + int(self.patch_size / 8),
int(start_x / 8):int(start_x / 8) + int(self.patch_size / 8)])
feature_input_16x \
= Variable(
feature_input_16x_whole[..., int(start_y / 16):int(start_y / 16) + int(self.patch_size / 16),
int(start_x / 16):int(start_x / 16) + int(self.patch_size / 16)])
if use_gpu:
feature_input_1x = feature_input_1x.cuda(self.dev_id)
feature_input_2x = feature_input_2x.cuda(self.dev_id)
feature_input_4x = feature_input_4x.cuda(self.dev_id)
feature_input_8x = feature_input_8x.cuda(self.dev_id)
feature_input_16x = feature_input_16x.cuda(self.dev_id)
# Loop through all neighbor images.
for neighbor_idx in range(0, num_neighbors):
data_in_tensor[0, 0, :, 1, ...] = torch.FloatTensor(
np.moveaxis(sweep_volume[neighbor_idx, ...], -1, -3))
with torch.no_grad():
data_in = Variable(data_in_tensor)
if use_gpu:
data_in = data_in.cuda(self.dev_id)
if neighbor_idx == 0:
cost_volume \
= self.model_deepMVS.forward_feature(data_in, [feature_input_1x, feature_input_2x, feature_input_4x,
feature_input_8x, feature_input_16x]).data[...]
else:
cost_volume \
= torch.max(cost_volume, self.model_deepMVS.forward_feature(data_in, [feature_input_1x, feature_input_2x,
feature_input_4x, feature_input_8x,
feature_input_16x]).data[...])
# Make final prediction.
with torch.no_grad():
predict = self.model_deepMVS.forward_predict(Variable(cost_volume[:, 0, ...]))
# Compute copy range.
if col_idx == 0:
copy_x_start = 0
copy_x_end = self.patch_size - border_x
elif col_idx == col_total - 1:
copy_x_start = border_x + col_idx * self.stride
copy_x_end = width
else:
copy_x_start = border_x + col_idx * self.stride
copy_x_end = copy_x_start + self.stride
if row_idx == 0:
copy_y_start = 0
copy_y_end = self.patch_size - border_y
elif row_idx == row_total - 1:
copy_y_start = border_y + row_idx * self.stride
copy_y_end = height
else:
copy_y_start = border_y + row_idx * self.stride
copy_y_end = copy_y_start + self.stride
# Copy the prediction to buffer.
copy_x_start = int(copy_x_start)
copy_x_end = int(copy_x_end)
copy_y_start = int(copy_y_start)
copy_y_end = int(copy_y_end)
predict_raw[..., copy_y_start:copy_y_end, copy_x_start:copy_x_end] \
= predict.data[0, :, copy_y_start - start_y:copy_y_end - start_y,
copy_x_start - start_x:copy_x_end - start_x]
######################################################
# compute the depth probability
######################################################
with torch.no_grad():
depth_prob = F.softmax(Variable(predict_raw), dim=0).data.numpy()
######################################################
# Pass through DenseCRF.
######################################################
with torch.no_grad():
unary_energy = F.log_softmax(Variable(predict_raw), dim=0).data.numpy()
crf = dcrf.DenseCRF2D(width, height, self.n_depths)
crf.setUnaryEnergy(-unary_energy.reshape(self.n_depths, height * width))
ref_img_full = (list_img[ref_idx] * 255.0).astype(np.uint8)
crf.addPairwiseBilateral(sxy=(self.dict_DenseCRF['sigma_xy'], self.dict_DenseCRF['sigma_xy']),
srgb=(
self.dict_DenseCRF['sigma_rgb'], self.dict_DenseCRF['sigma_rgb'], self.dict_DenseCRF['sigma_rgb']),
rgbim=ref_img_full,
compat=self.dict_DenseCRF['compat'],
kernel=dcrf.FULL_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
new_raw = crf.inference(self.dict_DenseCRF['iteration_num'])
new_raw = np.array(new_raw).reshape(self.n_depths, height, width)
return new_raw, depth_prob
def compute(self, list_src_img, list_src_cam, ref_idx,
min_disp, disp_step, max_depth):
img = list_src_img[0]
height, width, n_channels = img.shape
# build PSV
PSVs = self.build_PSV(list_src_img, list_src_cam, ref_idx,
height, width,
min_disp, disp_step, max_depth)
# call deepMVS
dp_refined, dp = self.perform_DeepMVS(list_src_img, ref_idx, PSVs,
height, width)
if self.do_filter:
return dp_refined
else:
return dp
| 44.662162 | 133 | 0.519334 |
8d4cacbd742147da5d6989b10bcdb6b6304489da | 17,064 | py | Python | eng/mgmt/automation/generate.py | yifan-zhou922/azure-sdk-for-java | e8dddefaa739e2a680529a910c81249685ea5e30 | [
"MIT"
] | null | null | null | eng/mgmt/automation/generate.py | yifan-zhou922/azure-sdk-for-java | e8dddefaa739e2a680529a910c81249685ea5e30 | [
"MIT"
] | null | null | null | eng/mgmt/automation/generate.py | yifan-zhou922/azure-sdk-for-java | e8dddefaa739e2a680529a910c81249685ea5e30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import sys
import json
import glob
import yaml
import shutil
import logging
import argparse
import requests
import tempfile
import subprocess
import urllib.parse
from typing import Tuple
pwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
from parameters import *
from utils import set_or_increase_version
from utils import update_service_ci_and_pom
from utils import update_root_pom
from utils import update_version
from generate_data import sdk_automation as sdk_automation_data
os.chdir(pwd)
# Add two more indent for list in yaml dump
class ListIndentDumper(yaml.SafeDumper):
def increase_indent(self, flow = False, indentless = False):
return super(ListIndentDumper, self).increase_indent(flow, False)
def generate(
sdk_root: str,
service: str,
spec_root: str,
readme: str,
autorest: str,
use: str,
tag: str = None,
version: str = None,
autorest_options: str = '',
**kwargs,
) -> bool:
module = ARTIFACT_FORMAT.format(service)
namespace = NAMESPACE_FORMAT.format(service)
output_dir = os.path.join(
sdk_root,
'sdk/{0}'.format(service),
module,
)
shutil.rmtree(os.path.join(output_dir, 'src/main'), ignore_errors = True)
if os.path.exists(os.path.join(output_dir, 'src/samples/README.md')):
# samples contains hand-written code
shutil.rmtree(os.path.join(output_dir, 'src/samples/java', namespace.replace('.', '/'), 'generated'), ignore_errors = True)
else:
shutil.rmtree(os.path.join(output_dir, 'src/samples'), ignore_errors = True)
if re.match(r'https?://', spec_root):
readme = urllib.parse.urljoin(spec_root, readme)
else:
readme = os.path.join(spec_root, readme)
tag_option = '--tag={0}'.format(tag) if tag else ''
version_option = '--package-version={0}'.format(version) if version else ''
command = 'autorest --version={0} --use={1} --java --java.azure-libraries-for-java-folder={2} --java.output-folder={3} --java.namespace={4} {5}'.format(
autorest,
use,
os.path.abspath(sdk_root),
os.path.abspath(output_dir),
namespace,
' '.join((tag_option, version_option, FLUENTLITE_ARGUMENTS, autorest_options, readme)),
)
logging.info(command)
if os.system(command) != 0:
logging.error('[GENERATE] Autorest fail')
return False
module = ARTIFACT_FORMAT.format(service)
group = GROUP_ID
output_folder = OUTPUT_FOLDER_FORMAT.format(service)
update_service_ci_and_pom(sdk_root, service, group, module)
update_root_pom(sdk_root, service)
update_version(sdk_root, output_folder)
return True
def compile_package(sdk_root, service) -> bool:
module = ARTIFACT_FORMAT.format(service)
if os.system(
'mvn --no-transfer-progress clean verify package -f {0}/pom.xml -Dmaven.javadoc.skip -Dgpg.skip -Drevapi.skip -pl {1}:{2} -am'.format(
sdk_root, GROUP_ID, module)) != 0:
logging.error('[COMPILE] Maven build fail')
return False
return True
def generate_changelog_and_breaking_change(
sdk_root,
old_jar,
new_jar,
**kwargs,
) -> Tuple[bool, str]:
logging.info('[CHANGELOG] changelog jar: {0} -> {1}'.format(
old_jar, new_jar))
stdout = subprocess.run(
'mvn --no-transfer-progress clean compile exec:java -q -f {0}/eng/mgmt/changelog/pom.xml -DOLD_JAR="{1}" -DNEW_JAR="{2}"'
.format(sdk_root, old_jar, new_jar),
stdout = subprocess.PIPE,
shell = True,
).stdout
logging.info('[CHANGELOG] changelog output: {0}'.format(stdout))
config = json.loads(stdout)
return (config.get('breaking', False), config.get('changelog', ''))
def update_changelog(changelog_file, changelog):
version_pattern = '^## (\d+\.\d+\.\d+(?:-[\w\d\.]+)?) \((.*?)\)'
with open(changelog_file, 'r') as fin:
old_changelog = fin.read()
first_version = re.search(version_pattern, old_changelog, re.M)
if not first_version:
logging.error(
'[Changelog][Skip] Cannot read first version from {}'.format(
changelog_file))
return
left = old_changelog[first_version.end():]
second_version = re.search(version_pattern, left, re.M)
if not second_version:
logging.error(
'[Changelog][Skip] Cannot read second version from {}'.format(
changelog_file))
return
first_version_part = old_changelog[:first_version.end() +
second_version.start()]
# remove text starting from the first '###' (usually the block '### Features Added')
first_version_part = re.sub('\n###.*', '\n', first_version_part, re.S)
first_version_part = re.sub('\s+$', '', first_version_part)
first_version_part += '\n\n'
if changelog.strip() != '':
first_version_part += changelog.strip() + '\n\n'
with open(changelog_file, 'w') as fout:
fout.write(first_version_part +
old_changelog[first_version.end() + second_version.start():])
logging.info('[Changelog][Success] Write to changelog')
def compare_with_maven_package(sdk_root, service, stable_version,
current_version):
logging.info('[Changelog] Compare stable version {0} with current version {1}'.format(stable_version, current_version))
if stable_version == current_version:
logging.info('[Changelog][Skip] no previous version')
return
module = ARTIFACT_FORMAT.format(service)
r = requests.get(
MAVEN_URL.format(group_id = GROUP_ID.replace('.', '/'),
artifact_id = module,
version = stable_version))
r.raise_for_status()
old_jar_fd, old_jar = tempfile.mkstemp('.jar')
try:
with os.fdopen(old_jar_fd, 'wb') as tmp:
tmp.write(r.content)
new_jar = os.path.join(
sdk_root,
JAR_FORMAT.format(service = service,
artifact_id = module,
version = current_version))
if not os.path.exists(new_jar):
raise Exception('Cannot found built jar in {0}'.format(new_jar))
breaking, changelog = generate_changelog_and_breaking_change(
sdk_root, old_jar, new_jar)
if changelog:
changelog_file = os.path.join(
sdk_root,
CHANGELOG_FORMAT.format(service = service,
artifact_id = module))
update_changelog(changelog_file, changelog)
else:
logging.error('[Changelog][Skip] Cannot get changelog')
finally:
os.remove(old_jar)
def get_version(
sdk_root: str,
service: str,
) -> str:
version_file = os.path.join(sdk_root, 'eng/versioning/version_client.txt')
module = ARTIFACT_FORMAT.format(service)
project = '{0}:{1}'.format(GROUP_ID, module)
with open(version_file, 'r') as fin:
for line in fin.readlines():
version_line = line.strip()
if version_line.startswith('#'):
continue
versions = version_line.split(';')
if versions[0] == project:
return version_line
logging.error('Cannot get version of {0}'.format(project))
return None
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
'--spec-root',
default =
'https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/',
help = 'Spec root folder',
)
parser.add_argument(
'-r',
'--readme',
help =
'Readme path, Sample: "storage" or "specification/storage/resource-manager/readme.md"',
)
parser.add_argument('-t', '--tag', help = 'Specific tag')
parser.add_argument('-v', '--version', help = 'Specific sdk version')
parser.add_argument(
'-s',
'--service',
help = 'Service Name if not the same as spec name',
)
parser.add_argument(
'-u',
'--use',
default = AUTOREST_JAVA,
help = 'Autorest java plugin',
)
parser.add_argument(
'--autorest',
default = AUTOREST_CORE_VERSION,
help = 'Autorest version',
)
parser.add_argument(
'--autorest-options',
default = '',
help = 'Additional autorest options',
)
parser.add_argument('--suffix', help = 'Suffix for namespace and artifact')
parser.add_argument(
'--auto-commit-external-change',
action = 'store_true',
help = 'Automatic commit the generated code',
)
parser.add_argument('--user-name', help = 'User Name for commit')
parser.add_argument('--user-email', help = 'User Email for commit')
parser.add_argument(
'config',
nargs = '*',
)
return parser.parse_args()
def update_parameters(suffix):
# update changeable parameters in parameters.py
global SUFFIX, NAMESPACE_SUFFIX, ARTIFACT_SUFFIX, NAMESPACE_FORMAT, ARTIFACT_FORMAT, OUTPUT_FOLDER_FORMAT
SUFFIX = suffix
NAMESPACE_SUFFIX = '.{0}'.format(SUFFIX) if SUFFIX else ''
ARTIFACT_SUFFIX = '-{0}'.format(SUFFIX) if SUFFIX else ''
NAMESPACE_FORMAT = 'com.azure.resourcemanager.{{0}}{0}'.format(
NAMESPACE_SUFFIX)
ARTIFACT_FORMAT = 'azure-resourcemanager-{{0}}{0}'.format(ARTIFACT_SUFFIX)
OUTPUT_FOLDER_FORMAT = 'sdk/{{0}}/{0}'.format(ARTIFACT_FORMAT)
def valid_service(service: str):
return re.sub('[^a-z0-9_]', '', service.lower())
def read_api_specs(api_specs_file: str) -> Tuple[str, dict]:
# return comment and api_specs
with open(api_specs_file) as fin:
lines = fin.readlines()
comment = ''
for i, line in enumerate(lines):
if not line.strip().startswith('#'):
comment = ''.join(lines[:i])
api_specs = yaml.safe_load(''.join(lines[i:]))
break
else:
raise Exception('api-specs.yml should has non comment line')
return comment, api_specs
def write_api_specs(api_specs_file: str, comment: str, api_specs: dict):
with open(api_specs_file, 'w') as fout:
fout.write(comment)
fout.write(yaml.dump(api_specs, Dumper = ListIndentDumper))
def get_and_update_service_from_api_specs(
api_specs_file: str,
spec: str,
service: str = None,
):
SPECIAL_SPEC = {'resources'}
if spec in SPECIAL_SPEC:
if not service:
service = spec
return valid_service(service)
comment, api_specs = read_api_specs(api_specs_file)
api_spec = api_specs.get(spec)
if not service:
if api_spec:
service = api_spec.get('service')
if not service:
service = spec
service = valid_service(service)
if service != spec:
api_specs[spec] = dict() if not api_spec else api_spec
api_specs[spec]['service'] = service
write_api_specs(api_specs_file, comment, api_specs)
return service
def get_suffix_from_api_specs(api_specs_file: str, spec: str):
comment, api_specs = read_api_specs(api_specs_file)
api_spec = api_specs.get(spec)
if api_spec and api_spec.get('suffix'):
return api_spec.get('suffix')
return SUFFIX
def sdk_automation(input_file: str, output_file: str):
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
sdk_root = os.path.abspath(os.path.join(base_dir, SDK_ROOT))
api_specs_file = os.path.join(base_dir, API_SPECS_FILE)
with open(input_file, 'r') as fin:
config = json.load(fin)
packages = []
for readme in config['relatedReadmeMdFiles']:
match = re.search(
'specification/([^/]+)/resource-manager/readme.md',
readme,
re.IGNORECASE,
)
if not match:
logging.info(
'[Skip] readme path does not format as specification/*/resource-manager/readme.md'
)
else:
spec = match.group(1)
service = get_and_update_service_from_api_specs(
api_specs_file, spec)
pre_suffix = SUFFIX
suffix = get_suffix_from_api_specs(api_specs_file, spec)
update_parameters(suffix)
# TODO: use specific function to detect tag in "resources"
tag = None
if service == 'resources':
with open(os.path.join(config['specFolder'], readme)) as fin:
tag_match = re.search('tag: (package-resources-[\S]+)',
fin.read())
if tag_match:
tag = tag_match.group(1)
else:
tag = 'package-resources-2020-10'
module = ARTIFACT_FORMAT.format(service)
stable_version, current_version = set_or_increase_version(
sdk_root,
GROUP_ID,
module
)
succeeded = generate(
sdk_root,
service,
spec_root = config['specFolder'],
readme = readme,
autorest = AUTOREST_CORE_VERSION,
use = AUTOREST_JAVA,
tag = tag,
)
if succeeded:
compile_package(sdk_root, service)
generated_folder = OUTPUT_FOLDER_FORMAT.format(service)
packages.append({
'packageName':
'{0}'.format(ARTIFACT_FORMAT.format(service)),
'path': [
generated_folder,
CI_FILE_FORMAT.format(service),
POM_FILE_FORMAT.format(service),
'eng/versioning',
'pom.xml',
],
'readmeMd': [readme],
'artifacts': [
'{0}/pom.xml'.format(generated_folder),
] + [
jar for jar in glob.glob('{0}/target/*.jar'.format(
generated_folder))
],
'result':
'succeeded' if succeeded else 'failed',
})
update_parameters(pre_suffix)
if not packages:
# try data-plane codegen
packages = sdk_automation_data(config)
with open(output_file, 'w') as fout:
output = {
'packages': packages,
}
json.dump(output, fout)
def main():
args = vars(parse_args())
if args.get('config'):
return sdk_automation(args['config'][0], args['config'][1])
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
sdk_root = os.path.abspath(os.path.join(base_dir, SDK_ROOT))
api_specs_file = os.path.join(base_dir, API_SPECS_FILE)
readme = args['readme']
match = re.match(
'specification/([^/]+)/resource-manager/readme.md',
readme,
re.IGNORECASE,
)
if not match:
spec = readme
readme = 'specification/{0}/resource-manager/readme.md'.format(spec)
else:
spec = match.group(1)
args['readme'] = readme
args['spec'] = spec
# update_parameters(
# args.get('suffix') or get_suffix_from_api_specs(api_specs_file, spec))
update_parameters(args.get('suffix'))
service = get_and_update_service_from_api_specs(api_specs_file, spec,
args['service'])
args['service'] = service
module = ARTIFACT_FORMAT.format(service)
stable_version, current_version = set_or_increase_version(sdk_root, GROUP_ID, module, **args)
args['version'] = current_version
succeeded = generate(sdk_root, **args)
if succeeded:
succeeded = compile_package(sdk_root, service)
if succeeded:
compare_with_maven_package(sdk_root, service, stable_version,
current_version)
if args.get('auto_commit_external_change') and args.get(
'user_name') and args.get('user_email'):
pwd = os.getcwd()
try:
os.chdir(sdk_root)
os.system('git add eng/versioning eng/mgmt pom.xml {0} {1}'.format(
CI_FILE_FORMAT.format(service),
POM_FILE_FORMAT.format(service)))
os.system(
'git -c user.name={0} -c user.email={1} commit -m "[Automation] External Change"'
.format(args['user_name'], args['user_email']))
finally:
os.chdir(pwd)
if not succeeded:
raise RuntimeError('Failed to generate code or compile the package')
if __name__ == '__main__':
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %X',
)
main()
| 33.263158 | 156 | 0.597398 |
d98085f8f4d98c54e03e40d80217b31e8a9bb189 | 3,414 | py | Python | scripts/kconfig/kconfig.py | FingerprintCardsAB/zephyr | 2055b84f7909453904606a83192497f10ce132d4 | [
"Apache-2.0"
] | null | null | null | scripts/kconfig/kconfig.py | FingerprintCardsAB/zephyr | 2055b84f7909453904606a83192497f10ce132d4 | [
"Apache-2.0"
] | null | null | null | scripts/kconfig/kconfig.py | FingerprintCardsAB/zephyr | 2055b84f7909453904606a83192497f10ce132d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Modified from: https://github.com/ulfalizer/Kconfiglib/blob/master/examples/merge_config.py
import argparse
import sys
from kconfiglib import Kconfig, Symbol, BOOL, STRING, TRISTATE, TRI_TO_STR
def main():
parse_args()
print("Parsing Kconfig tree in {}".format(args.kconfig_root))
kconf = Kconfig(args.kconfig_root)
# Enable warnings for assignments to undefined symbols
kconf.enable_undef_warnings()
# This script uses alldefconfig as the base. Other starting states could be set
# up here as well. The approach in examples/allnoconfig_simpler.py could
# provide an allnoconfig starting state for example.
print("Using {} as base".format(args.conf_fragments[0]))
for config in args.conf_fragments[1:]:
print("Merging {}".format(config))
# Create a merged configuration by loading the fragments with replace=False
for config in args.conf_fragments:
kconf.load_config(config, replace=False)
# Print warnings for symbols whose actual value doesn't match the assigned
# value
for sym in kconf.defined_syms:
# Was the symbol assigned to?
if sym.user_value is not None:
verify_assigned_value(sym)
# Turn the warning for malformed .config lines into an error
for warning in kconf.warnings:
if "ignoring malformed line" in warning:
print("Aborting due to malformed configuration settings",
file=sys.stderr)
sys.exit(1)
# Write the merged configuration
kconf.write_config(args.dotconfig)
# Write the C header
kconf.write_autoconf(args.autoconf)
def verify_assigned_value(sym):
# Verifies that the value assigned to 'sym' "took" (matches the value the
# symbol actually got), printing a warning otherwise
# Tristate values are represented as 0, 1, 2. Having them as
# "n", "m", "y" is more convenient here, so convert.
if sym.type in (BOOL, TRISTATE):
user_value = TRI_TO_STR[sym.user_value]
else:
user_value = sym.user_value
if user_value != sym.str_value:
print('warning: {} was assigned the value "{}" but got the value "{}". '
"Check its dependencies in the 'menuconfig' interface (see the "
"Application Development Primer section of the manual), or in "
"the Kconfig reference at "
"http://docs.zephyrproject.org/reference/kconfig/CONFIG_{}.html "
"(which is updated regularly from the master branch)"
.format(name_and_loc(sym), user_value, sym.str_value, sym.name),
file=sys.stderr)
def name_and_loc(sym):
# Helper for printing the name and Kconfig file location(s) for a symbol
if not sym.nodes:
return sym.name + " (undefined)"
return "{} (defined at {})".format(
sym.name,
", ".join("{}:{}".format(node.filename, node.linenr)
for node in sym.nodes))
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("kconfig_root")
parser.add_argument("dotconfig")
parser.add_argument("autoconf")
parser.add_argument("conf_fragments", metavar='conf', type=str, nargs='+')
args = parser.parse_args()
if __name__ == "__main__":
main()
| 33.145631 | 93 | 0.668424 |
58411c0bd9ad916e2af7fae25d0dfab6028602dc | 3,218 | py | Python | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/javascript/__init__.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | 2 | 2018-04-16T03:08:42.000Z | 2021-01-06T10:21:49.000Z | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/javascript/__init__.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | null | null | null | my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/javascript/__init__.py | liutongliang/myVim | 6c7ab36f25f4a5e2e1daeab8c43509975eb031e3 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from mock import patch
import functools
import os
from ycmd.tests.test_utils import ( ClearCompletionsCache, IsolatedApp,
SetUpApp, StopCompleterServer,
WaitUntilCompleterServerReady )
shared_app = None
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpPackage():
"""Initializes the ycmd server as a WebTest application that will be shared
by all tests using the SharedYcmd decorator in this package. Additional
configuration that is common to these tests, like starting a semantic
subserver, should be done here."""
global shared_app
with patch( 'ycmd.completers.javascript.hook.'
'ShouldEnableTernCompleter', return_value = False ):
shared_app = SetUpApp()
WaitUntilCompleterServerReady( shared_app, 'javascript' )
def tearDownPackage():
global shared_app
StopCompleterServer( shared_app, 'javascript' )
def SharedYcmd( test ):
"""Defines a decorator to be attached to tests of this package. This decorator
passes the shared ycmd application as a parameter.
Do NOT attach it to test generators but directly to the yielded tests."""
global shared_app
@functools.wraps( test )
def Wrapper( *args, **kwargs ):
ClearCompletionsCache()
return test( shared_app, *args, **kwargs )
return Wrapper
def IsolatedYcmd( test ):
"""Defines a decorator to be attached to tests of this package. This decorator
passes a unique ycmd application as a parameter. It should be used on tests
that change the server state in a irreversible way (ex: a semantic subserver
is stopped or restarted) or expect a clean state (ex: no semantic subserver
started, no .ycm_extra_conf.py loaded, etc).
Do NOT attach it to test generators but directly to the yielded tests."""
@functools.wraps( test )
def Wrapper( *args, **kwargs ):
with patch( 'ycmd.completers.javascript.hook.'
'ShouldEnableTernCompleter', return_value = False ):
with IsolatedApp() as app:
try:
test( app, *args, **kwargs )
finally:
StopCompleterServer( app, 'javascript' )
return Wrapper
| 34.978261 | 80 | 0.726538 |
6d4882b3c8b168ce59786832f20b97eebd5ecad5 | 161 | py | Python | pyorbit/services/__init__.py | davidchristine88/pyorbit | 6bf81573145ec3c6fe22f046920f069817eb0845 | [
"MIT"
] | null | null | null | pyorbit/services/__init__.py | davidchristine88/pyorbit | 6bf81573145ec3c6fe22f046920f069817eb0845 | [
"MIT"
] | 2 | 2018-02-02T18:34:12.000Z | 2018-02-03T14:37:08.000Z | pyorbit/services/__init__.py | davidchristine88/pyorbit | 6bf81573145ec3c6fe22f046920f069817eb0845 | [
"MIT"
] | 3 | 2018-02-02T20:14:37.000Z | 2018-06-22T17:54:52.000Z | from .config import Config
from .status import Status
from .system import System
from .fw import Firmware
from .cell_fw import CellFirmware
from .pki import PKI
| 23 | 33 | 0.813665 |
8445832ebe9055d8f68a97292e9cb16c6ae5895d | 1,538 | py | Python | models/base_rnn.py | sarrouti/multi-class-text-classification-pytorch | 7d208e0b57afa0bdfdd12ac50d5b1f962333f1dc | [
"MIT"
] | 3 | 2021-03-09T02:39:30.000Z | 2022-01-05T06:00:55.000Z | models/base_rnn.py | sarrouti/multi-class-text-classification-pytorch | 7d208e0b57afa0bdfdd12ac50d5b1f962333f1dc | [
"MIT"
] | null | null | null | models/base_rnn.py | sarrouti/multi-class-text-classification-pytorch | 7d208e0b57afa0bdfdd12ac50d5b1f962333f1dc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 02:09:09 2020
@author: sarroutim2
"""
import torch.nn as nn
class BaseRNN(nn.Module):
"""Applies a multi-layer RNN to an input sequence.
Note:
Do not use this class directly, use one of the sub classes.
Inputs: ``*args``, ``**kwargs``
- ``*args``: variable length argument list.
- ``**kwargs``: arbitrary keyword arguments.
Attributes:
SYM_MASK: masking symbol
SYM_EOS: end-of-sequence symbol
"""
SYM_MASK = "MASK"
SYM_EOS = "EOS"
def __init__(self, vocab_size, hidden_dim,
dropout, num_layers, rnn_cell):
"""Constructor for BaseRNN.
Args:
vocab_size (int): size of the vocabulary
max_len (int): maximum allowed length for the sequence to be processed
hidden_size (int): number of features in the hidden state `h`
input_dropout_p (float): dropout probability for the input sequence
dropout_p (float): dropout probability for the output sequence
n_layers (int): number of recurrent layers
rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')
"""
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.rnn_cell = getattr(nn, rnn_cell.upper())
self.dropout = dropout
def forward(self, *args, **kwargs):
raise NotImplementedError()
| 30.76 | 82 | 0.613134 |
9ab025fd6e39fbf843e98780bcef23db09059a1e | 901 | py | Python | Flask/Dashboards/Light/app/models.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | 1 | 2019-02-18T18:56:07.000Z | 2019-02-18T18:56:07.000Z | Flask/Dashboards/Light/app/models.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | null | null | null | Flask/Dashboards/Light/app/models.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Light Bootstrap Dashboard - coded in Flask
Author : AppSeed App Generator
Design : Creative-Tim.com
License : MIT
Support : https://appseed.us/support
"""
from app import db
from flask_login import UserMixin
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
user = db.Column(db.String(64), unique = True)
email = db.Column(db.String(120), unique = True)
password = db.Column(db.String(500))
def __init__(self, user, email, password):
self.user = user
self.password = password
self.email = email
def __repr__(self):
return '<User %r>' % (self.id)
def save(self):
# inject self into db session
db.session.add ( self )
# commit change and save the object
db.session.commit( )
return self
| 23.710526 | 58 | 0.599334 |
d277e4cebdcff47cf9c3e5ff62409a3bd0eec276 | 49,290 | py | Python | rvGraph.py | ArmelliniMG/sumoDRToffline | 8cb6b24232818ddeb4ec713cd7b124b2d4326cda | [
"MIT"
] | 2 | 2021-07-22T01:47:20.000Z | 2021-09-21T17:55:51.000Z | rvGraph.py | ArmelliniMG/sumoDRToffline | 8cb6b24232818ddeb4ec713cd7b124b2d4326cda | [
"MIT"
] | null | null | null | rvGraph.py | ArmelliniMG/sumoDRToffline | 8cb6b24232818ddeb4ec713cd7b124b2d4326cda | [
"MIT"
] | null | null | null | # (C) 2021 Armellini
# This code is licensed under MIT license (see LICENSE for details)
# @file rvGraph.py
# @author Maria Giuliana Armellini
# @date 2020-01-01
# Creates the pairwise Graph for requests and vehicles
try:
import xml.etree.cElementTree as ET
except ImportError as e:
print("recovering from ImportError '%s'" % e)
import xml.etree.ElementTree as ET
import subprocess
import re
from collections import namedtuple
import sys
import os
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import sumolib # noqa
def get_direct_routes(options, requests):
# determined shortest route for each request
with open("temp_dua/aux_dua_input.xml", "w+") as dua_file:
dua_file.write("<routes>\n")
for req in requests:
# write Duarouter input
dua_file.write("""\t<trip id="SR-%sy_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f"/>\n""" % # noqa
(req.ID, req.ID, req.orig_pos, req.orig_edge,
req.dest_edge, req.dest_pos))
dua_file.write("</routes>\n")
# run duarouter:
duarouter = sumolib.checkBinary('duarouter')
subprocess.call([duarouter, "-n", options.network,
"--route-files", "temp_dua/aux_dua_input.xml",
"-o", "temp_dua/aux_dua_output.xml",
"--ignore-errors", "true", "--no-warnings", "true"])
# parse solutions:
for trip, route in sumolib.xml.parse_fast_nested("temp_dua/aux_dua_output.alt.xml", # noqa
"vehicle", ("id"),
"route", ("cost", "edges")): # noqa
travel_time = int(float(route.cost))
route_id = trip.id.split("-")[1]
req_id = int(route_id.split("_")[1][:-1])
request = [req for req in requests if req.ID == req_id][0]
# set direct route time (minimum travel time)
request.direct_route = travel_time + options.stop_length
if len(route.edges.split(" ")) <= 1:
print("Warning: Request %s has same origin/destination edge" %
request.ID)
# define time windows with drf if not given
if request.orig_window and request.dest_window:
continue
elif not request.orig_window and not request.dest_window:
earliest_pickup = request.depart
earliest_dropoff = earliest_pickup + travel_time
latest_dropoff = earliest_pickup + max(options.drf_min,
(travel_time * request.drf))
latest_pickup = latest_dropoff - travel_time
elif not request.orig_window and request.dest_window:
# reset value if pickup at depart time lead to a longer route
earliest_pickup = max(request.depart,
request.dest_window[1] - travel_time * request.drf) # noqa
# earliest pick up not later than earliest drop off
earliest_pickup = min(earliest_pickup,
request.dest_window[0] - travel_time)
latest_pickup = request.dest_window[1] - travel_time
elif request.orig_window and not request.dest_window:
earliest_dropoff = request.orig_window[0] + travel_time
latest_dropoff = request.orig_window[1] + max(options.drf_min,
(travel_time * request.drf)) # noqa
# set time windows
if not request.orig_window:
request.orig_window = [earliest_pickup, latest_pickup]
if not request.dest_window:
request.dest_window = [earliest_dropoff, latest_dropoff]
# DEBUG only
if request.orig_window[0] > request.orig_window[1]:
print("Bug: Origin window of request ", req_id,
" not possible. Latest time < than earlier")
if request.dest_window[0] > request.dest_window[1]:
print("Bug: Destination window of request ", req_id,
" not possible. Latest time < than earlier")
def get_pt_routes(requests, options, unique_stops, service_areas, req_trip,
drt_service, pt_stops):
# determined DRT service area and
# if a trip with PT is required for each request
with open("temp_dua/aux_duapt_input.xml", "w+") as dua_PTfile:
dua_PTfile.write("<routes>\n")
requests.sort(key=lambda x: x.depart)
for req in requests:
req.drt = []
search_PT = False
for area in service_areas:
if req.orig_edge in area.edges and req.dest_edge in area.edges:
# request in DRT area
req.drt.append(drt_service(area.name, "both"))
req.trips.append(req_trip(req.ID, req.ID, req.orig_edge,
req.dest_edge, req.orig_pos, req.dest_pos,
req.depart, req.orig_window,
req.dest_window, area.name, None, None,
None, None, None))
elif req.orig_edge in area.edges:
# only request origin in DRT area.
# First mile with DRT and then PT
req.drt.append(drt_service(area.name, "first"))
search_PT = True
elif req.dest_edge in area.edges:
# only request destination in DRT area.
# Last mile with DRT (before PT)
req.drt.append(drt_service(area.name, "last"))
search_PT = True
if not req.drt:
# Trip not possible: request origin and destination are not in
# a DRT service area
print("Request %s could not be served. Origin and destination outside DRT service areas" % req.ID) # noqa
req.rejected = True
req.rejected_cause = "No DRT service available in trip area"
if search_PT:
# if trip with PT is necessary
# Write trip for PT
dua_PTfile.write("""\t<person id="%s" depart="%d">\n""" %
(req.ID, req.depart))
dua_PTfile.write("""\t\t<personTrip from="%s" to="%s" modes="public"/>\n""" % # noqa
(req.orig_edge, req.dest_edge))
dua_PTfile.write("\t</person>\n")
# Write trip from request origin to all possibles PT stops
for stop_id, stop_edge in unique_stops.items():
dua_PTfile.write("""\t<person id="%s-%s" depart="0">\n""" %
(req.ID, stop_id))
dua_PTfile.write("""\t\t<walk from="%s" to="%s"/>\n""" %
(req.orig_edge, stop_edge))
dua_PTfile.write("\t</person>\n")
dua_PTfile.write("""\t<person id="%s-%s" depart="0">\n""" %
(stop_id, req.ID))
dua_PTfile.write("""\t\t<walk from="%s" to="%s"/>\n""" %
(stop_edge, req.dest_edge))
dua_PTfile.write("\t</person>\n")
dua_PTfile.write("</routes>\n")
# runs Duarouter.py for public transport:
duarouter = sumolib.checkBinary('duarouter')
subprocess.call([duarouter, "-n", options.network, "--additional-files",
options.additional, "--route-files",
"temp_dua/aux_duapt_input.xml", "-o",
"temp_dua/aux_duapt_output.xml", "--ignore-errors",
"true", "--no-warnings", "true"])
# remove trips that requires pt combination
pt_combination = {}
pt_routed = []
count = -1
index = None
rejected = None
for _, parsenode in ET.iterparse("temp_dua/aux_duapt_output.xml", None):
count += 1
if parsenode.tag == "walk" and "busStop" in parsenode.attrib and not index: # noqa
from_stop = parsenode.attrib["busStop"]
elif parsenode.tag == "ride":
if index:
# if person need more than 1 pt ride, trip not possible
rejected = True
continue
to_stop = parsenode.attrib["busStop"]
pt_line = parsenode.attrib["lines"]
index = count
elif parsenode.tag == "person" and "-" not in parsenode.attrib["id"]:
pt_routed.append(int(parsenode.attrib["id"]))
if rejected:
# if person need more than 1 pt ride, trip not possible
req = [x for x in requests
if x.ID == int(parsenode.attrib["id"])][0]
print("Request %s could not be served. PT connection requires multiple lines" % # noqa
parsenode.attrib["id"])
req.rejected = True
req.rejected_cause = "PT connection requires multiple lines"
elif not from_stop:
# if not pt combination possible
req = [x for x in requests
if x.ID == int(parsenode.attrib["id"])][0]
print("Request %s could not be served. PT connection not available" % # noqa
parsenode.attrib["id"])
req.rejected = True
req.rejected_cause = "PT connection not available"
else:
pt_combination[parsenode.attrib["id"]] = [from_stop, to_stop,
pt_line, index]
from_stop, to_stop, pt_line, index, rejected = [None] * 5
count = -1
elif parsenode.tag == "person" and "-" in parsenode.attrib["id"]:
count = -1
for req in requests:
if req.ID not in pt_routed:
print("Request %s could not be served. PT connection not available" % req.ID) # noqa
req.rejected = True
req.rejected_cause = "PT connection not available"
# for person in sumolib.xml.parse("%saux_dua_output.xml" % options.path,
# "person"):
# if "-" in person.id:
# # walk trip
# continue
# elif person.id not in pt_combination:
# for index, trip in enumerate(person._child_list):
# if trip.name == "ride":
# from_stop = person._child_list[index-1].busStop
# pt_combination[person.id] = [from_stop, trip.busStop,
# trip.lines, index]
# else:
# # if person have to take more than 1 PT, trip not possible
# del pt_combination[person.id]
# req = [x for x in requests if x.ID == int(person.id)][0]
# print("Request %s could not be served. No connection with PT
# available" % person.id)
# req.rejected = True
# parse travel times
travel_time = {}
for person, trip in sumolib.xml.parse_fast_nested("temp_dua/aux_duapt_output.alt.xml", # noqa
"person", ("id"),
"personTrip", ("costs")):
costs = [int(float(x)) for x in trip.costs.split(' ')]
if "-" in person.id:
# should be only 1 cost
travel_time[person.id] = sum(costs)
elif person.id in pt_combination:
# first/last cost is the walk time to/from stop
ride_index = pt_combination[person.id][3]
walk_first = sum(costs[:ride_index])
walk_last = sum(costs[ride_index+1:])
travel_time[person.id] = [walk_first, costs[ride_index], walk_last]
# read public transport routes:
for key in pt_combination:
req = [x for x in requests if x.ID == int(key)][0]
bus_stop_from = pt_combination[key][0]
bus_stop_to = pt_combination[key][1]
pt_line = pt_combination[key][2]
first_mile = travel_time[key][0]
last_mile = travel_time[key][2]
string_pos = 96
# set DRT service for first and last mile
if req.drt[0].leg == "first":
try:
first_area = req.drt[0].area
second_area = req.drt[1].area
except:
first_area = req.drt[0].area
second_area = None
else:
try:
second_area = req.drt[0].area
first_area = req.drt[1].area
except:
second_area = req.drt[0].area
first_area = None
for area in service_areas:
if area.name == first_area:
first_area_edges = area.edges
if area.name == second_area:
second_area_edges = area.edges
if first_mile <= options.maxwalk and last_mile <= options.maxwalk:
print("Request %s could not be served. Connection with PT with a short walking time is available" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "PT connection accessible by walking"
elif first_mile > options.maxwalk and last_mile > options.maxwalk:
if first_area is None or second_area is None:
print("Request %s could not be served. Walking times from/to PT exceed the maximum and there is no DRT service for the first/last mile" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT service available in first/last mile area" # noqa
continue
# Max walking time to/from PT is exceeded: DRT service for first
# and second leg trip
# search all usable PT stops
for stop1 in pt_stops:
if pt_line not in stop1.line:
continue
elif stop1.edge not in first_area_edges:
continue
elif not req.orig_window[0] < (stop1.time - options.minwait):
continue
pt_vehicle = stop1.vehicle
for stop2 in pt_stops:
if stop1.stop == stop2.stop:
continue
if pt_line not in stop2.line:
continue
elif stop1.time > stop2.time:
continue
elif not stop2.vehicle == pt_vehicle:
continue
elif stop2.edge not in second_area_edges:
continue
elif not (stop2.time + options.minwait) < req.dest_window[1]: # noqa
continue
# first mile trip
string_pos = string_pos+1
first_dest_window = [max(req.orig_window[0], (stop1.time - options.maxwait)), # noqa
(stop1.time - options.minwait)]
first_orig_window = [req.orig_window[0],
min(req.orig_window[1], first_dest_window[1])] # noqa
req.trips.append(req_trip(req.ID, "%i%s" % (req.ID, chr(string_pos)), # noqa
req.orig_edge, stop1.edge,
req.orig_pos, 10, req.depart,
first_orig_window,
first_dest_window,
first_area, pt_line, pt_vehicle,
stop1.stop, stop2.stop, "first"))
if first_dest_window[0] > first_dest_window[1]:
print("Bug: Destination window of request ", req.ID,
" for Trip ", req.ID,
"%i%s" % (req.ID, chr(string_pos)),
"not possible. Latest time minor than earlier")
# last mile trip
# string_pos = string_pos+1
last_orig_window = [(stop2.time + options.minwait),
min(stop2.time + options.maxwait, req.dest_window[1])] # noqa
req.trips.append(req_trip("%i%s" % (req.ID, chr(string_pos)), # noqa
req.ID, stop2.edge,
req.dest_edge, 10, req.dest_pos,
req.depart, last_orig_window,
req.dest_window, second_area,
pt_line, pt_vehicle, stop1.stop,
stop2.stop, "last"))
if last_orig_window[0] > last_orig_window[1]:
print("Bug: Origin window of request ", req.ID,
" for Trip ", "%i%s" % (req.ID, chr(string_pos)),
req.ID, "not possible. Latest time minor than earlier") # noqa
# set request value to 0.5 to secure that both first
# and last mile will served
req.value = 0.5
elif first_mile > options.maxwalk:
if first_area is None:
print("Request %s could not be served. First mile requires DRT service and there is no service in the area" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT service available in first mile area" # noqa
continue
# Max walking time to PT exceeded: DRT service from origin to
# PT line (first leg trip)
stop2 = bus_stop_to
possible = False
# first check which pt vehicles can take the request to make at
# destination on time
walk_time = travel_time["%s-%s" % (stop2, req.ID)]
latest_PT_arrival = req.dest_window[1] - walk_time
try:
stop2_vehicles = [stop.vehicle for stop in pt_stops
if stop.stop == stop2 and
req.orig_window[0] < stop.time <= latest_PT_arrival] # noqa
except:
continue
for stop2_vehicle in stop2_vehicles:
stop2_times = [stop.time for stop in pt_stops
if stop.stop == stop2 and
req.orig_window[0] < stop.time <= latest_PT_arrival] # noqa
for stop2_time in stop2_times:
# search all usable PT stops
for stop1 in pt_stops:
if stop1.stop == stop2:
continue
if pt_line not in stop1.line:
continue
elif stop1.time > stop2_time:
continue
elif stop1.edge not in first_area_edges:
continue
elif stop1.vehicle not in stop2_vehicle:
continue
elif not req.orig_window[0] < (stop1.time - options.minwait): # noqa
continue
# if pt_line in stop1.line and stop1.edge in
# first_area_edges and stop1.vehicle in stop2_vehicle
# and req.orig_window[0] < (stop1.time-options.minwait)
# if stop is used by the pt line and if the stop is in
# the service area of the DRT and if the vehicle is
# possible and if the pt line stops at the stop
# after the earliest request depart
possible = True
string_pos = string_pos+1
first_dest_window = [max(req.orig_window[0], (stop1.time - options.maxwait)), # noqa
(stop1.time - options.minwait)]
first_orig_window = [req.orig_window[0],
min(req.orig_window[1], first_dest_window[1])] # noqa
req.trips.append(req_trip("%i%s" % (req.ID, chr(string_pos)), # noqa
"%i%s" % (req.ID, chr(string_pos)), # noqa
req.orig_edge, stop1.edge,
req.orig_pos, 10, req.depart,
first_orig_window,
first_dest_window,
first_area, pt_line,
stop2_vehicle, stop1.stop,
stop2, "first"))
if first_dest_window[0] > first_dest_window[1]:
print("Bug: Destination window of request ",
req.ID, " for Trip ", req.ID,
"%i%s" % (req.ID, chr(string_pos)),
"not possible. Latest time minor than earlier") # noqa
if possible is False:
print("Request %s could not be served. No combination between PT and DRT for first mile possible" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT-PT combination for first mile"
elif last_mile > options.maxwalk:
if second_area is None:
print("Request %s could not be served. Last mile requires DRT service and there is no service in the area" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT service available in last mile area" # noqa
continue
# Max walking time from PT exceeded: DRT service from PT line to
# destination (last leg trip)
stop1 = bus_stop_from
possible = False
# first check which PT vehicles can take the request, considering
# walking time to stop:
walk_time = travel_time["%s-%s" % (req.ID, stop1)]
earliest_PT_arrival = req.orig_window[0] + walk_time
try:
stop1_vehicles = [stop.vehicle for stop in pt_stops
if stop.stop == stop1 and
(earliest_PT_arrival + options.minwait) < stop.time < req.dest_window[1]] # noqa
except:
continue
for stop1_vehicle in stop1_vehicles:
stop1_times = [stop.time for stop in pt_stops
if stop.stop == stop1 and
(earliest_PT_arrival + options.minwait) < stop.time < req.dest_window[1]] # noqa
for stop1_time in stop1_times:
# search all usable PT stops
for stop2 in pt_stops:
if stop1 == stop2.stop:
continue
if pt_line not in stop2.line:
continue
if stop1_time > stop2.time:
continue
elif stop2.edge not in second_area_edges:
continue
elif stop2.vehicle not in stop1_vehicle:
continue
elif not (stop2.time + options.minwait) < req.dest_window[1]: # noqaS
continue
# if pt_line in stop2.line and stop2.edge in
# second_area_edges and stop2.vehicle in stop1_vehicle
# and (stop2.time + options.minwait) < req.dest_window[1]: # noqa
# if stop is used by the pt line and if the stop is in
# the service area of the DRT and if vehicle is
# possible and if the pt line stops at the stop before
# the latest request drop-off
depart = [stop.time for stop in pt_stops
if stop.stop == stop1 and
stop.vehicle == stop2.vehicle][0] - walk_time - options.minwait # noqa
possible = True
string_pos = string_pos+1
last_orig_window = [(stop2.time + options.minwait),
min(stop2.time + options.maxwait, req.dest_window[1])] # noqa
last_dest_window = [max(last_orig_window[0], req.dest_window[0]), # noqa
req.dest_window[1]]
req.trips.append(req_trip("%i%s" % (req.ID, chr(string_pos)), # noqa
"%i%s" % (req.ID, chr(string_pos)), # noqa
stop2.edge, req.dest_edge,
10, req.dest_pos, depart,
last_orig_window,
last_dest_window,
second_area, pt_line,
stop1_vehicle, stop1,
stop2.stop, "last"))
if last_orig_window[0] > last_orig_window[1]:
print("Bug: Origin window of request ", req.ID,
" for Trip ",
"%i%s" % (req.ID, chr(string_pos)), req.ID,
"not possible. Latest time minor than earlier") # noqa
if possible is False:
print("Request %s could not be served. No combination between PT and DRT for last mile possible" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT-PT combination for last mile"
def pair_possible(req1, req2, first_stop, second_stop, travel_time):
# check if combination is possible
if first_stop.endswith("y"):
first_orig_window = [trip.orig_window for trip in req1.trips
if str(trip.orig_ID) == first_stop[:-1]]
first_orig_window = first_orig_window[0]
if first_stop[-2] is str:
first_dest_window = [trip.dest_window for trip in req1.trips
if str(trip.dest_ID) == first_stop[:-2]]
else:
first_dest_window = req1.dest_window
elif first_stop.endswith("z"):
first_dest_window = [trip.dest_window for trip in req1.trips
if str(trip.dest_ID) == first_stop[:-1]]
first_dest_window = first_dest_window[0]
if first_stop[-2] is str:
first_orig_window = [trip.orig_window for trip in req1.trips
if str(trip.dest_ID) == first_stop[:-2]]
else:
first_orig_window = req1.orig_window
if second_stop.endswith("y"):
second_orig_window = [trip.orig_window for trip in req2.trips
if str(trip.orig_ID) == second_stop[:-1]]
second_orig_window = second_orig_window[0]
if second_stop[-2] is str:
second_dest_window = [trip.dest_window for trip in req2.trips
if str(trip.dest_ID) == second_stop[:-2]]
else:
second_dest_window = req2.dest_window
elif second_stop.endswith("z"):
second_dest_window = [trip.dest_window for trip in req2.trips
if str(trip.dest_ID) == second_stop[:-1]]
second_dest_window = second_dest_window[0]
if second_stop[-2] is str:
second_orig_window = [trip.orig_window for trip in req2.trips
if str(trip.dest_ID) == second_stop[:-2]]
else:
second_orig_window = req2.orig_window
possible_pair = False
if first_stop.endswith("y") and second_stop.endswith("y"):
if (first_orig_window[0] + travel_time) <= second_orig_window[1] and \
(first_orig_window[0] + travel_time) <= first_dest_window[1]:
# if picking up req 1 at earliest time, req 2 can be pick up at
# least at latest time and if combining this request, the latest
# drop off time of first request is not exceed then pair possible
possible_pair = True
elif first_stop.endswith("y") and second_stop.endswith("z"):
if (first_orig_window[0] + travel_time) <= second_dest_window[1] and \
(first_orig_window[0] + travel_time) <= first_dest_window[1]:
# if picking up req 1 at earliest time, req 2 can be drop off at
# least at latest time and if combining this requests, the latest
# drop off time of first request is not exceed then pair possible
possible_pair = True
elif first_stop.endswith("z") and second_stop.endswith("y"):
if (first_dest_window[0] + travel_time) < second_orig_window[1]:
# if droping off req 1 at earliest time, req 2 can be pick up at
# least at latest time, then pair possible
possible_pair = True
elif first_stop.endswith("z") and second_stop.endswith("z"):
if (first_dest_window[0] + travel_time) <= second_dest_window[1] and \
second_orig_window[0] <= first_dest_window[1]:
# if droping off req 1 at earliest time, req 2 can be drop off at
# least at latest time and if req 2 can be picked up before lastest
# dropp off of req 1, then pair possible
possible_pair = True
return possible_pair
def get_routes(options, requests, vehicles, RV_dic, RD_dic):
# creates input file for duarouter
with open("temp_dua/aux_dua_input.xml", "w+") as dua_file:
# VR pair: combination between vehicle and request pick-up
# RD pair: request to vehicle depot
# RR pair: combination between two different requests
# TODO this can be improve
dua_file.write("<routes>\n")
counter = 0 # avoid repeted route IDs
for index1, req_t1 in enumerate(requests): # for each request
if req_t1.rejected is True:
continue
for req1 in req_t1.trips:
# write SR (direct route) pair
dua_file.write("""\t<trip id="SR%s-%sy_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.orig_ID, req1.dest_ID,
req1.orig_pos, req1.orig_edge, req1.dest_edge,
req1.dest_pos, vehicles[0].type))
# write VR (vehicle and request pick-up) and
# RD (request to vehicle depot) pairs
for vehicle in vehicles:
if vehicle.area == req1.service_area:
dua_file.write("""\t<trip id="VR%s-%s_%sy" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, vehicle.ID, req1.orig_ID,
vehicle.depot_pos, vehicle.depot,
req1.orig_edge, req1.orig_pos,
vehicle.type))
dua_file.write("""\t<trip id="RD%s-%sz_%s" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.dest_ID, vehicle.ID,
req1.orig_pos, req1.orig_edge,
vehicle.depot, vehicle.depot_pos,
vehicle.type))
counter += 1 # avoid repeated route IDs
# write RR pairs ("0p1d", pick-up 0 and then deliver 1)
for req_t2 in requests[index1 + 1:]:
if req_t2.rejected is True:
continue
for req2 in req_t2.trips:
if req1.service_area == req2.service_area:
# combination only possible if requests are in the
# same area
if req1.orig_window[0] <= req2.orig_window[1] and \
req2.orig_window[0] <= req1.dest_window[1]:
# if earliest pick up of req 1 before latest
# pick up time of req 2 and if earliest pick up
# time of req 2 before latest drop off of req1:
# combination 1p2p possible
dua_file.write("""\t<trip id="RR%s-%sy_%sy" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.orig_ID,
req2.orig_ID, req1.orig_pos,
req1.orig_edge, req2.orig_edge,
req2.orig_pos, vehicle.type))
if req1.orig_window[0] <= req2.dest_window[1] and \
req2.dest_window[0] <= req1.dest_window[1] and \
req2.orig_window[0] <= req1.orig_window[1]:
# if earliest pick up of req 1 before latest
# drop off of req 2 and if earliest drop off of
# req 2 before latest drop off of req 1 and
# if earliest pick up of req 2 before latest
# pick up of req 1: combination 1p2d possible
dua_file.write("""\t<trip id="RR%s-%sy_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.orig_ID,
req2.dest_ID, req1.orig_pos,
req1.orig_edge, req2.dest_edge,
req2.dest_pos, vehicle.type))
if req1.dest_window[0] <= req2.orig_window[1]:
# if earliest drop off of req 1 before latest
# pick up of req 2: combination 1d2p possible
dua_file.write("""\t<trip id="RR%s-%sz_%sy" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.dest_ID,
req2.orig_ID, req1.dest_pos,
req1.dest_edge, req2.orig_edge,
req2.orig_pos, vehicle.type))
if req1.dest_window[0] <= req2.dest_window[1] and \
req2.orig_window[0] <= req1.dest_window[1]:
# if earliest drop off of req 1 before latest
# drop off of req 2 and if earliest pick up of
# req 2 before latest drop off of req 1:
# combination 1d2d possible
dua_file.write("""\t<trip id="RR%s-%sz_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req1.dest_ID,
req2.dest_ID, req1.dest_pos,
req1.dest_edge, req2.dest_edge,
req2.dest_pos, vehicle.type))
if req2.orig_window[0] <= req1.orig_window[1] and \
req1.orig_window[0] <= req2.dest_window[1]:
# if earliest pick up of req 2 before latest
# pick up time of req 1 and if earliest pick up
# of req 1 before latest drop off time of req 2
# combination 2p1p possible
dua_file.write("""\t<trip id="RR%s-%sy_%sy" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req2.orig_ID,
req1.orig_ID, req2.orig_pos,
req2.orig_edge, req1.orig_edge,
req1.orig_pos, vehicle.type))
if req2.orig_window[0] <= req1.dest_window[1] and \
req1.dest_window[0] <= req2.dest_window[1] and \
req1.orig_window[0] <= req2.orig_window[1]:
# if earliest pickup of req 2 before latest
# drop off of req 1 and if earliest drop off of
# req 1 before latest drop off of req 2 and
# if earliest pickup of req 1 before latest
# pickup of req 2: combination 2p1d possible
dua_file.write("""\t<trip id="RR%s-%sy_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req2.orig_ID,
req1.dest_ID, req2.orig_pos,
req2.orig_edge, req1.dest_edge,
req1.dest_pos, vehicle.type))
if req2.dest_window[0] <= req1.orig_window[1]:
# if earliest drop off of req 2 before latest
# pick up of req 1: combination 2d1p possible
dua_file.write("""\t<trip id="RR%s-%sz_%sy" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req2.dest_ID,
req1.orig_ID, req2.dest_pos,
req2.dest_edge, req1.orig_edge,
req1.orig_pos, vehicle.type))
if req2.dest_window[0] <= req1.dest_window[1] and \
req1.orig_window[0] <= req2.dest_window[1]:
# if earliest drop off of req 2 before latest
# drop off of req 1 and if earliest pick up of
# req 1 before latest drop off of req 2:
# combination 2d1d possible
dua_file.write("""\t<trip id="RR%s-%sz_%sz" depart="0.0" departPos="%0.1f" from="%s" to="%s" arrivalPos="%0.1f" type="%s"/>\n""" % # noqa
(counter, req2.dest_ID,
req1.dest_ID, req2.dest_pos,
req2.dest_edge, req1.dest_edge,
req1.dest_pos, vehicle.type))
counter = counter + 1 # avoid repeated route IDs
dua_file.write("</routes>\n")
# runs Duarouter
duarouter = sumolib.checkBinary('duarouter')
subprocess.call([duarouter, "-n", options.network, "--additional-files",
options.additional, "--route-files",
"temp_dua/aux_dua_input.xml", "-o",
"temp_dua/aux_dua_output.xml", "--no-warnings", "true"])
removed_trips = []
# parse SR solutions:
for trip, route in sumolib.xml.parse_fast_nested("temp_dua/aux_dua_output.alt.xml", # noqa
"vehicle", ("id"),
"route", ("cost", "edges")): # noqa
if "SR" not in trip.id:
continue
route_id = trip.id.split("-")[1]
travel_time = int(float(route.cost))
route_edges = route.edges.split(" ")
req_id = route_id.split("_")[1]
req_id = [s for s in req_id if s.isdigit()]
req_id = int(''.join(req_id))
req = [req for req in requests if req.ID == req_id][0]
# consider time at stop
if len(route_edges) > 1:
travel_u_stop_time = travel_time + options.stop_length
else:
# if stop are the same, consider stop time only once
travel_u_stop_time = travel_time
# parse SR routes (direct)
# check if PT combination founded are possible
for trip in req.trips:
if (trip.orig_window[0] + travel_u_stop_time) > trip.dest_window[1]: # noqa
# trip not possible
removed_trips.append(trip.orig_ID)
req.trips.remove(trip)
if not req.trips:
req.rejected = True
req.rejected_cause = "No DRT-PT combination possible"
else:
RV_dic[route_id] = (travel_u_stop_time, -req.pax, -req.pax_wc)
# parse rest solutions:
for trip, route in sumolib.xml.parse_fast_nested("temp_dua/aux_dua_output.alt.xml", # noqa
"vehicle", ("id"),
"route", ("cost", "edges")): # noqa
if "SR" in trip.id:
continue
route_id = trip.id.split("-")[1]
# check if request rejected in SR parsing
if route_id.split("_")[0][:-1] in removed_trips or \
route_id.split("_")[1][:-1] in removed_trips:
continue
if route_id in RV_dic:
# route already parsed
continue
travel_time = int(float(route.cost))
route_edges = route.edges.split(" ")
if "RD" in trip.id:
# parse RD routes (request to vehicle depot)
# check if request rejected in SR parsing
if route_id.split("_")[0][:-1] in removed_trips:
continue
if route_id not in RD_dic:
RD_dic[route_id] = travel_time
continue
req_id = route_id.split("_")[1]
req_id = [s for s in req_id if s.isdigit()]
req_id = int(''.join(req_id))
req = [req for req in requests if req.ID == req_id][0]
# consider time at stop
if len(route_edges) > 1:
travel_u_stop_time = travel_time + options.stop_length
else:
# if stop are the same, consider stop time only once
travel_u_stop_time = travel_time
if "VR" in trip.id:
# parse VR routes (vehicle and request pick-up)
vehicle_id = route_id.split("_")[0]
vehicle = [vehicle for vehicle in vehicles
if vehicle.ID == vehicle_id][0]
if vehicle.start_time:
# if start time given, check if arrives on time
earliest_arrive = vehicle.start_time + travel_u_stop_time
if earliest_arrive > req.orig_window[1]:
continue
RV_dic[route_id] = (travel_u_stop_time, req.pax, req.pax_wc)
elif "RR" in trip.id:
# parse RR routes (request-request)
first_stop, second_stop = route_id.split("_")
if first_stop in removed_trips or second_stop in removed_trips:
continue
first_request = str(re.split('y|z', first_stop)[0])
first_request = [s for s in first_request if s.isdigit()]
first_request = int(''.join(first_request))
req1 = [req for req in requests if req.ID == first_request][0]
second_request = str(re.split('y|z', second_stop)[0])
second_request = [s for s in second_request if s.isdigit()]
second_request = int(''.join(second_request))
req2 = [req for req in requests if req.ID == second_request][0]
# check if pair is possible, if not continue
if not pair_possible(req1, req2, first_stop,
second_stop, travel_time):
continue
if route_id.endswith("z"):
# at delivery passenger get off the car (-)
RV_dic[route_id] = (travel_u_stop_time,
-req2.pax, -req2.pax_wc)
else: # at pick-up passenger get in the car (+)
RV_dic[route_id] = (travel_u_stop_time, req2.pax, req2.pax_wc)
return RV_dic, RD_dic
def RV_offline(options, requests, vehicles, service_areas, pt_stops):
# calculates all request-request and request-vehicle combinations
# warning object
warning_info = []
# dict with possible route pairs and request to depot
RV_dic = {}
RD_dic = {}
try:
os.mkdir('temp_dua')
except FileExistsError:
pass
# find direct routes and set time windows
get_direct_routes(options, requests)
# set trips info for each request according to the DRT service type
# define tuple for trip info and service area
req_trip = namedtuple('req_trip', ['orig_ID', 'dest_ID', 'orig_edge',
'dest_edge', 'orig_pos', 'dest_pos',
'depart', 'orig_window', 'dest_window',
'service_area', 'pt_line', 'pt_vehicle',
'bus_stop_from', 'bus_stop_to', "leg"])
drt_service = namedtuple('DRT', ['area', 'leg'])
if options.pt_stops:
# if DRT as feeder system for PT
# create dict with stop ids and edges
unique_stops = {stop.stop: stop.edge for stop in pt_stops}
get_pt_routes(requests, options, unique_stops, service_areas,
req_trip, drt_service, pt_stops)
elif options.service_area:
# if DRT as independent system but working with service areas
for req in requests:
areas = [area.name for area in service_areas
if (req.dest_edge in area.edges) and
(req.orig_edge in area.edges)]
string_pos = 96 # add letter to id
if not areas:
# request origin and destination are not in DRT service areas:
# trip not possible
warning_info.append("Request %s (id %s) could not be served. Origin and destination outside DRT service areas" % # noqa
(req.name, req.ID))
print("Request %s could not be served. Origin and destination outside DRT service areas" % # noqa
req.ID)
req.rejected = True
req.rejected_cause = "No DRT service available in trip area"
for area in areas:
string_pos += 1
req.trips.append(req_trip("%i%s" % (req.ID, chr(string_pos)),
"%i%s" % (req.ID, chr(string_pos)),
req.orig_edge, req.dest_edge,
req.orig_pos, req.dest_pos,
req.depart, req.orig_window,
req.dest_window, area, None, None,
None, None, None))
else:
# if DRT as independent system working in all net
for req in requests:
req.trips = [req_trip(req.ID, req.ID, req.orig_edge, req.dest_edge,
req.orig_pos, req.dest_pos, req.depart,
req.orig_window, req.dest_window, None, None,
None, None, None, None)]
# search possible routes for all pair combinations
RV_dic, RD_dic = get_routes(options, requests, vehicles, RV_dic, RD_dic)
# Return pairwise Graph with travel times and passenger
return RV_dic, RD_dic
| 53.057051 | 171 | 0.493528 |
f797f526dbb1dc66f3acaa9aad6df2c4ff4abe75 | 203 | py | Python | python-tf/sample_counter.py | vfonov/DARQ | 4845e416a3939b4e86e1441a4a44f4fea337caf5 | [
"MIT"
] | 2 | 2021-08-24T13:13:48.000Z | 2021-12-20T14:27:39.000Z | python-tf/sample_counter.py | vfonov/DARQ | 4845e416a3939b4e86e1441a4a44f4fea337caf5 | [
"MIT"
] | null | null | null | python-tf/sample_counter.py | vfonov/DARQ | 4845e416a3939b4e86e1441a4a44f4fea337caf5 | [
"MIT"
] | null | null | null | import tensorflow as tf
import sys
tf.logging.set_verbosity('WARN')
tf.compat.v1.enable_eager_execution()
files=sys.argv
for f in sys.argv[1:]:
print(f, sum(1 for i in tf.data.TFRecordDataset(f)))
| 20.3 | 56 | 0.743842 |
4b4dd7eb6e0822232a0e1b571b600f28f2e10a02 | 2,320 | py | Python | nesta/core/batchables/general/companies/sql2es/run.py | anniyanvr/nesta | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 13 | 2019-06-18T16:53:53.000Z | 2021-03-04T10:58:52.000Z | nesta/core/batchables/general/companies/sql2es/run.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 208 | 2018-08-10T13:15:40.000Z | 2021-07-21T10:16:07.000Z | nesta/core/batchables/general/companies/sql2es/run.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 8 | 2018-09-20T15:19:23.000Z | 2020-12-15T17:41:34.000Z | """
run.py (general.companies.sql2es)
==================================
Pipe curated Companies data from MySQL to Elasticsearch.
"""
from nesta.core.luigihacks.elasticsearchplus import ElasticsearchPlus
from nesta.core.orms.orm_utils import db_session, get_mysql_engine
from nesta.core.orms.orm_utils import object_to_dict
from nesta.core.orms.general_orm import CrunchbaseOrg
from ast import literal_eval
import boto3
import json
import logging
import os
def run():
test = literal_eval(os.environ["BATCHPAR_test"])
bucket = os.environ['BATCHPAR_bucket']
batch_file = os.environ['BATCHPAR_batch_file']
db_name = os.environ["BATCHPAR_db_name"]
es_host = os.environ['BATCHPAR_outinfo']
es_port = int(os.environ['BATCHPAR_out_port'])
es_index = os.environ['BATCHPAR_out_index']
es_type = os.environ['BATCHPAR_out_type']
entity_type = os.environ["BATCHPAR_entity_type"]
aws_auth_region = os.environ["BATCHPAR_aws_auth_region"]
# database setup
engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db_name)
# es setup
es = ElasticsearchPlus(hosts=es_host,
port=es_port,
aws_auth_region=aws_auth_region,
no_commit=("AWSBATCHTEST" in os.environ),
entity_type=entity_type,
strans_kwargs={'filename': 'companies.json'})
# Collect input file
s3 = boto3.resource('s3')
obj = s3.Object(bucket, batch_file)
org_ids = json.loads(obj.get()['Body']._raw_stream.read())
org_ids = org_ids[:20 if test else None]
logging.info(f"{len(org_ids)} organisations retrieved from s3")
# Pipe orgs to ES
with db_session(engine) as session:
query = session.query(CrunchbaseOrg).filter(CrunchbaseOrg.id.in_(org_ids))
for row in query.all():
row = object_to_dict(row)
_row = es.index(index=es_index, doc_type=es_type,
id=row.pop('id'), body=row)
logging.info("Batch job complete.")
if __name__ == "__main__":
log_stream_handler = logging.StreamHandler()
logging.basicConfig(handlers=[log_stream_handler, ],
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s")
run()
| 35.151515 | 82 | 0.65 |
ffcd6fc317efb8bbc81e09bdb48854caffcb9129 | 6,794 | py | Python | tests/cli/commands/test_info_command.py | aditishankar/test | dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-01-29T20:33:56.000Z | 2021-08-06T17:35:16.000Z | tests/cli/commands/test_info_command.py | aditishankar/test | dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | tests/cli/commands/test_info_command.py | aditishankar/test | dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-04-14T11:15:17.000Z | 2021-12-15T16:58:24.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import importlib
import io
import logging
import os
import unittest
import pytest
from parameterized import parameterized
from rich.console import Console
from airflow.cli import cli_parser
from airflow.cli.commands import info_command
from airflow.config_templates import airflow_local_settings
from airflow.logging_config import configure_logging
from airflow.version import version as airflow_version
from tests.test_utils.config import conf_vars
def capture_show_output(instance):
console = Console()
with console.capture() as capture:
instance.info(console)
return capture.get()
class TestPiiAnonymizer(unittest.TestCase):
def setUp(self) -> None:
self.instance = info_command.PiiAnonymizer()
def test_should_remove_pii_from_path(self):
home_path = os.path.expanduser("~/airflow/config")
assert "${HOME}/airflow/config" == self.instance.process_path(home_path)
@parameterized.expand(
[
(
"postgresql+psycopg2://postgres:airflow@postgres/airflow",
"postgresql+psycopg2://p...s:PASSWORD@postgres/airflow",
),
(
"postgresql+psycopg2://postgres@postgres/airflow",
"postgresql+psycopg2://p...s@postgres/airflow",
),
(
"postgresql+psycopg2://:airflow@postgres/airflow",
"postgresql+psycopg2://:PASSWORD@postgres/airflow",
),
(
"postgresql+psycopg2://postgres/airflow",
"postgresql+psycopg2://postgres/airflow",
),
]
)
def test_should_remove_pii_from_url(self, before, after):
assert after == self.instance.process_url(before)
class TestAirflowInfo:
@classmethod
def setup_class(cls):
# pylint: disable=attribute-defined-outside-init
cls.parser = cli_parser.get_parser()
@classmethod
def teardown_class(cls) -> None:
for handler_ref in logging._handlerList[:]:
logging._removeHandlerRef(handler_ref)
importlib.reload(airflow_local_settings)
configure_logging()
@staticmethod
def unique_items(items):
return {i[0] for i in items}
@conf_vars(
{
("core", "executor"): "TEST_EXECUTOR",
("core", "dags_folder"): "TEST_DAGS_FOLDER",
("core", "plugins_folder"): "TEST_PLUGINS_FOLDER",
("logging", "base_log_folder"): "TEST_LOG_FOLDER",
('core', 'sql_alchemy_conn'): 'postgresql+psycopg2://postgres:airflow@postgres/airflow',
('logging', 'remote_logging'): 'True',
('logging', 'remote_base_log_folder'): 's3://logs-name',
}
)
def test_airflow_info(self):
importlib.reload(airflow_local_settings)
configure_logging()
instance = info_command.AirflowInfo(info_command.NullAnonymizer())
expected = {
'executor',
'version',
'task_logging_handler',
'plugins_folder',
'base_log_folder',
'remote_base_log_folder',
'dags_folder',
'sql_alchemy_conn',
}
assert self.unique_items(instance._airflow_info) == expected
def test_system_info(self):
instance = info_command.AirflowInfo(info_command.NullAnonymizer())
expected = {'uname', 'architecture', 'OS', 'python_location', 'locale', 'python_version'}
assert self.unique_items(instance._system_info) == expected
def test_paths_info(self):
instance = info_command.AirflowInfo(info_command.NullAnonymizer())
expected = {'airflow_on_path', 'airflow_home', 'system_path', 'python_path'}
assert self.unique_items(instance._paths_info) == expected
def test_tools_info(self):
instance = info_command.AirflowInfo(info_command.NullAnonymizer())
expected = {
'cloud_sql_proxy',
'gcloud',
'git',
'kubectl',
'mysql',
'psql',
'sqlite3',
'ssh',
}
assert self.unique_items(instance._tools_info) == expected
@conf_vars(
{
('core', 'sql_alchemy_conn'): 'postgresql+psycopg2://postgres:airflow@postgres/airflow',
}
)
def test_show_info(self):
with contextlib.redirect_stdout(io.StringIO()) as stdout:
info_command.show_info(self.parser.parse_args(["info"]))
output = stdout.getvalue()
assert airflow_version in output
assert "postgresql+psycopg2://postgres:airflow@postgres/airflow" in output
@conf_vars(
{
('core', 'sql_alchemy_conn'): 'postgresql+psycopg2://postgres:airflow@postgres/airflow',
}
)
def test_show_info_anonymize(self):
with contextlib.redirect_stdout(io.StringIO()) as stdout:
info_command.show_info(self.parser.parse_args(["info", "--anonymize"]))
output = stdout.getvalue()
assert airflow_version in output
assert "postgresql+psycopg2://p...s:PASSWORD@postgres/airflow" in output
@pytest.fixture()
def setup_parser():
yield cli_parser.get_parser()
class TestInfoCommandMockHttpx:
@conf_vars(
{
('core', 'sql_alchemy_conn'): 'postgresql+psycopg2://postgres:airflow@postgres/airflow',
}
)
def test_show_info_anonymize_fileio(self, httpx_mock, setup_parser):
httpx_mock.add_response(
url="https://file.io",
method="post",
json={
"success": True,
"key": "f9U3zs3I",
"link": "https://file.io/TEST",
"expiry": "14 days",
},
status_code=200,
)
with contextlib.redirect_stdout(io.StringIO()) as stdout:
info_command.show_info(setup_parser.parse_args(["info", "--file-io"]))
assert "https://file.io/TEST" in stdout.getvalue()
| 34.313131 | 100 | 0.636885 |
d55f914897d268a4a849334de14037679847b738 | 1,512 | py | Python | process_pipelines/xiaohuangji.py | LaudateCorpus1/chinese_chatbot_corpus | 3f1516947753744919611b5dc7f74e513d157036 | [
"Apache-2.0"
] | 2,321 | 2019-04-16T06:14:55.000Z | 2022-03-31T08:10:34.000Z | process_pipelines/xiaohuangji.py | wysstartgo/chinese_chatbot_corpus | a05452654b8b374eee646dd6bf43e4e9f4e57661 | [
"Apache-2.0"
] | 9 | 2019-05-08T02:14:07.000Z | 2021-08-04T07:39:33.000Z | process_pipelines/xiaohuangji.py | wysstartgo/chinese_chatbot_corpus | a05452654b8b374eee646dd6bf43e4e9f4e57661 | [
"Apache-2.0"
] | 506 | 2019-04-19T09:14:40.000Z | 2022-03-28T02:31:45.000Z | import codecs
import os
from config import Config
from util import *
def prepocess(raw_corpus_file_name, result_file_name):
start_end_symbol = "E"
utterance_symbol = "M"
raw_corpus_file = codecs.open(raw_corpus_file_name, encoding=Config.encoding, errors="replace")
result_file = codecs.open(result_file_name, "w", encoding=Config.encoding)
single_session = []
session_lengths = []
for index, line in enumerate(raw_corpus_file):
if index % 100000 == 0:
print(raw_corpus_file_name, index)
if line.startswith(start_end_symbol):
if len(single_session) == 2:
pairs = generate_single_pairs_from_multi_turn(single_session)
for pair in pairs:
result_file.write("\t".join(pair) + "\n")
session_lengths.append(len(single_session))
single_session = []
elif line.startswith(utterance_symbol):
line = line[1:].strip()
utterance = line.strip()
single_session.append(utterance)
print("avg session length", sum(session_lengths) / len(session_lengths))
raw_corpus_file.close()
result_file.close()
def xiaohuangji_process_pipeline():
print("xiaohuangji_process_pipeline")
raw_corpus_file_name = Config.raw_xiaohuangji_corpus_path
result_file_name = os.path.join(Config.clean_chat_corpus_root, "xiaohuangji.tsv")
prepocess(raw_corpus_file_name, result_file_name)
format_refine(result_file_name)
| 32.869565 | 99 | 0.685185 |
4b747db6febe0ebbd94750f23cec38c3b81673e6 | 8,097 | py | Python | otcextensions/sdk/cce/cce_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 10 | 2018-03-03T17:59:59.000Z | 2020-01-08T10:03:00.000Z | otcextensions/sdk/cce/cce_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 208 | 2020-02-10T08:27:46.000Z | 2022-03-29T15:24:21.000Z | otcextensions/sdk/cce/cce_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 15 | 2020-04-01T20:45:54.000Z | 2022-03-23T12:45:43.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
from openstack import exceptions
from openstack import service_description
from otcextensions.sdk.cce.v1 import _proxy as _proxy_v1
from otcextensions.sdk.cce.v3 import _proxy as _proxy_v3
class CceService(service_description.ServiceDescription):
"""The CCE service."""
supported_versions = {
'1': _proxy_v1.Proxy,
'3': _proxy_v3.Proxy
}
def _make_proxy(self, instance):
"""Create a Proxy for the service in question.
:param instance:
The `openstack.connection.Connection` we're working with.
"""
config = instance.config
# First, check to see if we've got config that matches what we
# understand in the SDK.
version_string = config.get_api_version('cce') or '3'
endpoint_override = config.get_endpoint(self.service_type)
ep = config.get_service_catalog().url_for(
service_type=self.service_type,
region_name=config.region_name)
epo = '%(base)s/api/v%(ver)s' % {
'base': ep,
'ver': version_string}
if version_string == '3':
epo += '/projects/%(project_id)s'
if epo and not endpoint_override:
endpoint_override = epo
# If the user doesn't give a version in config, but we only support
# one version, then just use that version.
if not version_string and len(self.supported_versions) == 1:
version_string = list(self.supported_versions)[0]
proxy_obj = None
if endpoint_override and version_string and self.supported_versions:
# Both endpoint override and version_string are set, we don't
# need to do discovery - just trust the user.
proxy_class = self.supported_versions.get(version_string[0])
if proxy_class:
proxy_obj = config.get_session_client(
self.service_type,
constructor=proxy_class,
)
proxy_obj.endpoint_override = endpoint_override
proxy_obj.additional_headers = {
'Content-Type': 'application/json'}
else:
warnings.warn(
"The configured version, {version} for service"
" {service_type} is not known or supported by"
" openstacksdk. The resulting Proxy object will only"
" have direct passthrough REST capabilities.".format(
version=version_string,
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
elif endpoint_override and self.supported_versions:
temp_adapter = config.get_session_client(
self.service_type
)
api_version = temp_adapter.get_endpoint_data().api_version
proxy_class = self.supported_versions.get(str(api_version[0]))
if proxy_class:
proxy_obj = config.get_session_client(
self.service_type,
constructor=proxy_class,
)
else:
warnings.warn(
"Service {service_type} has an endpoint override set"
" but the version discovered at that endpoint, {version}"
" is not supported by openstacksdk. The resulting Proxy"
" object will only have direct passthrough REST"
" capabilities.".format(
version=api_version,
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
if proxy_obj:
if getattr(proxy_obj, 'skip_discovery', False):
# Some services, like swift, don't have discovery. While
# keystoneauth will behave correctly and handle such
# scenarios, it's not super efficient as it involves trying
# and falling back a few times.
return proxy_obj
data = proxy_obj.get_endpoint_data()
# If we've gotten here with a proxy object it means we have
# an endpoint_override in place. If the catalog_url and
# service_url don't match, which can happen if there is a
# None plugin and auth.endpoint like with standalone ironic,
# we need to be explicit that this service has an endpoint_override
# so that subsequent discovery calls don't get made incorrectly.
if data.catalog_url != data.service_url:
ep_key = '{service_type}_endpoint_override'.format(
service_type=self.service_type)
config.config[ep_key] = data.service_url
proxy_obj = config.get_session_client(
self.service_type,
constructor=proxy_class,
)
return proxy_obj
# Make an adapter to let discovery take over
version_kwargs = {}
if version_string:
version_kwargs['version'] = version_string
elif self.supported_versions:
supported_versions = sorted([
int(f) for f in self.supported_versions])
version_kwargs['min_version'] = str(supported_versions[0])
version_kwargs['max_version'] = '{version}.latest'.format(
version=str(supported_versions[-1]))
temp_adapter = config.get_session_client(
self.service_type,
allow_version_hack=True,
**version_kwargs
)
found_version = temp_adapter.get_api_major_version()
if found_version is None:
if version_kwargs:
raise exceptions.NotSupported(
"The {service_type} service for {cloud}:{region_name}"
" exists but does not have any supported versions.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=instance.config.region_name))
else:
raise exceptions.NotSupported(
"The {service_type} service for {cloud}:{region_name}"
" exists but no version was discoverable.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=instance.config.region_name))
proxy_class = self.supported_versions.get(str(found_version[0]))
if not proxy_class:
# Maybe openstacksdk is being used for the passthrough
# REST API proxy layer for an unknown service in the
# service catalog that also doesn't have any useful
# version discovery?
warnings.warn(
"Service {service_type} has no discoverable version."
" The resulting Proxy object will only have direct"
" passthrough REST capabilities.".format(
service_type=self.service_type),
category=exceptions.UnsupportedServiceVersion)
return temp_adapter
proxy_class = self.supported_versions.get(str(found_version[0]))
if proxy_class:
version_kwargs['constructor'] = proxy_class
return config.get_session_client(
self.service_type,
allow_version_hack=True,
**version_kwargs
)
| 44.489011 | 79 | 0.599975 |
95af28e73996a762ed6bf6bc6be73ab61923539c | 26,857 | py | Python | python/plano.py | jirkadanek/quiver | 49785bbbabdd3b3e2197566cacbbffef55b1272e | [
"Apache-2.0"
] | 22 | 2016-09-16T15:34:17.000Z | 2021-08-30T14:14:36.000Z | python/plano.py | jirkadanek/quiver | 49785bbbabdd3b3e2197566cacbbffef55b1272e | [
"Apache-2.0"
] | 42 | 2016-09-16T16:31:29.000Z | 2021-10-31T15:40:23.000Z | python/plano.py | jirkadanek/quiver | 49785bbbabdd3b3e2197566cacbbffef55b1272e | [
"Apache-2.0"
] | 23 | 2016-08-26T15:50:34.000Z | 2021-12-09T14:51:13.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import atexit as _atexit
import binascii as _binascii
import codecs as _codecs
import collections as _collections
import ctypes as _ctypes
import fnmatch as _fnmatch
import getpass as _getpass
import json as _json
import os as _os
import random as _random
import re as _re
import shlex as _shlex
import shutil as _shutil
import signal as _signal
import socket as _socket
import subprocess as _subprocess
import sys as _sys
import tarfile as _tarfile
import tempfile as _tempfile
import time as _time
import traceback as _traceback
import types as _types
import uuid as _uuid
from subprocess import CalledProcessError
from subprocess import PIPE
# See documentation at http://www.ssorj.net/projects/plano.html
class PlanoException(Exception):
pass
LINE_SEP = _os.linesep
PATH_SEP = _os.sep
PATH_VAR_SEP = _os.pathsep
ENV = _os.environ
ARGS = _sys.argv
STDIN = _sys.stdin
STDOUT = _sys.stdout
STDERR = _sys.stderr
DEVNULL = _os.devnull
_message_levels = (
"debug",
"notice",
"warn",
"error",
)
_debug = _message_levels.index("debug")
_notice = _message_levels.index("notice")
_warn = _message_levels.index("warn")
_error = _message_levels.index("error")
_message_output = STDERR
_message_threshold = _notice
def set_message_output(writeable):
warn("Deprecated! Use enable_logging(output=output) instead")
enable_logging(output=writeable)
def set_message_threshold(level):
warn("Deprecated! Use enable_logging(level=level) instead")
enable_logging(level=level)
def enable_logging(level=None, output=None):
if level is not None:
if level == "warning":
level = "warn"
assert level in _message_levels
global _message_threshold
_message_threshold = _message_levels.index(level)
if output is not None:
if _is_string(output):
output = open(output, "w")
global _message_output
_message_output = output
def disable_logging():
global _message_threshold
_message_threshold = 4
def fail(message, *args):
error(message, *args)
if isinstance(message, BaseException):
raise message
raise PlanoException(message.format(*args))
def error(message, *args):
_print_message("Error", message, args)
def warn(message, *args):
if _message_threshold <= _warn:
_print_message("Warning", message, args)
def notice(message, *args):
if _message_threshold <= _notice:
_print_message(None, message, args)
def debug(message, *args):
if _message_threshold <= _debug:
_print_message("Debug", message, args)
def exit(arg=None, *args):
if arg in (0, None):
_sys.exit()
if _is_string(arg):
error(arg, args)
_sys.exit(1)
if isinstance(arg, int):
if arg > 0:
error("Exiting with code {0}", arg)
else:
notice("Exiting with code {0}", arg)
_sys.exit(arg)
raise Exception()
def _print_message(category, message, args):
if _message_output is None:
return
message = _format_message(category, message, args)
print(message, file=_message_output)
_message_output.flush()
def _format_message(category, message, args):
if not _is_string(message):
message = str(message)
if args:
message = message.format(*args)
if len(message) > 0 and message[0].islower():
message = message[0].upper() + message[1:]
if category:
message = "{0}: {1}".format(category, message)
program = program_name()
message = "{0}: {1}".format(program, message)
return message
def eprint(*args, **kwargs):
print(*args, file=_sys.stderr, **kwargs)
def flush():
STDOUT.flush()
STDERR.flush()
absolute_path = _os.path.abspath
normalize_path = _os.path.normpath
real_path = _os.path.realpath
exists = _os.path.lexists
is_absolute = _os.path.isabs
is_dir = _os.path.isdir
is_file = _os.path.isfile
is_link = _os.path.islink
file_size = _os.path.getsize
join = _os.path.join
split = _os.path.split
split_extension = _os.path.splitext
current_dir = _os.getcwd
sleep = _time.sleep
def home_dir(user=None):
return _os.path.expanduser("~{0}".format(user or ""))
def parent_dir(path):
path = normalize_path(path)
parent, child = split(path)
return parent
def file_name(file):
file = normalize_path(file)
dir, name = split(file)
return name
def name_stem(file):
name = file_name(file)
if name.endswith(".tar.gz"):
name = name[:-3]
stem, ext = split_extension(name)
return stem
def name_extension(file):
name = file_name(file)
stem, ext = split_extension(name)
return ext
def program_name(command=None):
if command is None:
args = ARGS
else:
args = command.split()
for arg in args:
if "=" not in arg:
return file_name(arg)
def which(program_name):
assert "PATH" in ENV
for dir in ENV["PATH"].split(PATH_VAR_SEP):
program = join(dir, program_name)
if _os.access(program, _os.X_OK):
return program
def read(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.read()
def write(file, string):
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.write(string)
return file
def append(file, string):
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.write(string)
return file
def prepend(file, string):
orig = read(file)
prepended = string + orig
return write(file, prepended)
def touch(file):
try:
_os.utime(file, None)
except OSError:
append(file, "")
return file
def tail(file, n):
return "".join(tail_lines(file, n))
def read_lines(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.readlines()
def write_lines(file, lines):
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="r") as f:
f.writelines(lines)
return file
def append_lines(file, lines):
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.writelines(string)
return file
def prepend_lines(file, lines):
orig_lines = read_lines(file)
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.writelines(lines)
f.writelines(orig_lines)
return file
# Derived from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
def tail_lines(file, n):
assert n >= 0
with _codecs.open(file, encoding="utf-8", mode="r") as f:
pos = n + 1
lines = list()
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = f.readlines()
pos *= 2
return lines[-n:]
def read_json(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return _json.load(f)
def write_json(file, obj):
_make_dir(parent_dir(file))
with _codecs.open(file, encoding="utf-8", mode="w") as f:
return _json.dump(obj, f, indent=4, separators=(",", ": "), sort_keys=True)
def parse_json(json):
return _json.loads(json)
def emit_json(obj):
return _json.dumps(obj, f, indent=4, separators=(",", ": "), sort_keys=True)
def http_get(url, output_file=None, insecure=False):
options = [
"-sf",
"-H", "'Expect:'",
]
if insecure:
options.append("--insecure")
if output_file is None:
return call_for_stdout("curl {0} {1}", " ".join(options), url)
call("curl {0} {1} -o {2}", " ".join(options), url, output_file)
def http_put(url, input_file, output_file=None, insecure=False):
options = [
"-sf",
"-X", "PUT",
"-H", "'Expect:'",
]
if insecure:
options.append("--insecure")
if output_file is None:
return call_for_stdout("curl {0} {1} -d @{2}", " ".join(options), url, input_file)
call("curl {0} {1} -d @{2} -o {3}", " ".join(options), url, input_file, output_file)
def http_get_json(url, insecure=False):
return parse_json(http_get(url, insecure=insecure))
def http_put_json(url, data, insecure=False):
with temp_file() as f:
write_json(f, data)
http_put(url, f, insecure=insecure)
def user_temp_dir():
try:
return ENV["XDG_RUNTIME_DIR"]
except KeyError:
return _tempfile.gettempdir()
def make_temp_file(suffix="", dir=None):
if dir is None:
dir = user_temp_dir()
return _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir)[1]
def make_temp_dir(suffix="", dir=None):
if dir is None:
dir = user_temp_dir()
return _tempfile.mkdtemp(prefix="plano-", suffix=suffix, dir=dir)
class temp_file(object):
def __init__(self, suffix="", dir=None):
self.file = make_temp_file(suffix=suffix, dir=dir)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_value, traceback):
_remove(self.file)
# Length in bytes, renders twice as long in hex
def unique_id(length=16):
assert length >= 1
assert length <= 16
uuid_bytes = _uuid.uuid4().bytes
uuid_bytes = uuid_bytes[:length]
return _binascii.hexlify(uuid_bytes).decode("utf-8")
def copy(from_path, to_path):
notice("Copying '{0}' to '{1}'", from_path, to_path)
return _copy(from_path, to_path)
def _copy(from_path, to_path):
if is_dir(to_path):
to_path = join(to_path, file_name(from_path))
else:
make_dir(parent_dir(to_path))
if is_dir(from_path):
_copytree(from_path, to_path, symlinks=True)
else:
_shutil.copy(from_path, to_path)
return to_path
def move(from_path, to_path):
notice("Moving '{0}' to '{1}'", from_path, to_path)
return _move(from_path, to_path)
def _move(from_path, to_path):
if is_dir(to_path):
to_path = join(to_path, file_name(from_path))
else:
parent_path = parent_dir(to_path)
if parent_path:
_make_dir(parent_path)
_shutil.move(from_path, to_path)
return to_path
def rename(path, expr, replacement):
path = normalize_path(path)
parent_dir, name = split(path)
to_name = string_replace(name, expr, replacement)
to_path = join(parent_dir, to_name)
notice("Renaming '{0}' to '{1}'", path, to_path)
move(path, to_path)
return to_path
def remove(path):
notice("Removing '{0}'", path)
return _remove(path)
def _remove(path):
if not exists(path):
return
if is_dir(path):
_shutil.rmtree(path, ignore_errors=True)
else:
_os.remove(path)
return path
def make_link(source_path, link_file):
notice("Making link '{0}' to '{1}'", link_file, source_path)
if exists(link_file):
assert read_link(link_file) == source_path
return
link_dir = parent_dir(link_file)
if link_dir:
make_dir(link_dir)
_os.symlink(source_path, link_file)
return link_file
def read_link(file):
return _os.readlink(file)
def find(dir, *patterns):
matched_paths = set()
if not patterns:
patterns = ("*",)
for root, dirs, files in _os.walk(dir):
for pattern in patterns:
matched_dirs = _fnmatch.filter(dirs, pattern)
matched_files = _fnmatch.filter(files, pattern)
matched_paths.update([join(root, x) for x in matched_dirs])
matched_paths.update([join(root, x) for x in matched_files])
return sorted(matched_paths)
def find_any_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
return paths[0]
def find_only_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
if len(paths) > 1:
fail("Found multiple files: {0}", ", ".join(paths))
assert len(paths) == 1
return paths[0]
def find_exactly_one(dir, *patterns):
path = find_only_one(dir, *patterns)
if path is None:
fail("Found no matching files")
return path
def string_replace(string, expr, replacement, count=0):
return _re.sub(expr, replacement, string, count)
def configure_file(input_file, output_file, **kwargs):
notice("Configuring '{0}' for output '{1}'", input_file, output_file)
content = read(input_file)
for name, value in kwargs.items():
content = content.replace("@{0}@".format(name), value)
write(output_file, content)
_shutil.copymode(input_file, output_file)
def make_dir(dir):
notice("Making directory '{0}'", dir)
return _make_dir(dir)
def _make_dir(dir):
if dir == "":
return dir
if not exists(dir):
_os.makedirs(dir)
return dir
# Returns the current working directory so you can change it back
def change_dir(dir):
notice("Changing directory to '{0}'", dir)
return _change_dir(dir)
def _change_dir(dir):
try:
cwd = current_dir()
except FileNotFoundError:
cwd = None
_os.chdir(dir)
return cwd
def list_dir(dir, *patterns):
assert is_dir(dir)
names = _os.listdir(dir)
if not patterns:
return sorted(names)
matched_names = set()
for pattern in patterns:
matched_names.update(_fnmatch.filter(names, pattern))
return sorted(matched_names)
class working_dir(object):
def __init__(self, dir):
self.dir = dir
self.prev_dir = None
def __enter__(self):
if self.dir is None or self.dir == ".":
return
if not exists(self.dir):
_make_dir(self.dir)
notice("Entering working directory '{0}'", absolute_path(self.dir))
self.prev_dir = _change_dir(self.dir)
return self.dir
def __exit__(self, exc_type, exc_value, traceback):
if self.dir is None or self.dir == ".":
return
notice("Returning to directory '{0}'", absolute_path(self.prev_dir))
_change_dir(self.prev_dir)
class temp_working_dir(working_dir):
def __init__(self):
super(temp_working_dir, self).__init__(make_temp_dir())
def __exit__(self, exc_type, exc_value, traceback):
super(temp_working_dir, self).__exit__(exc_type, exc_value, traceback)
_remove(self.dir)
class working_env(object):
def __init__(self, **env_vars):
self.env_vars = env_vars
self.prev_env_vars = dict()
def __enter__(self):
for name, value in self.env_vars.items():
if name in ENV:
self.prev_env_vars[name] = ENV[name]
ENV[name] = str(value)
def __exit__(self, exc_type, exc_value, traceback):
for name, value in self.env_vars.items():
if name in self.prev_env_vars:
ENV[name] = self.prev_env_vars[name]
else:
del ENV[name]
def call(command, *args, **kwargs):
proc = start_process(command, *args, **kwargs)
check_process(proc)
def call_for_exit_code(command, *args, **kwargs):
proc = start_process(command, *args, **kwargs)
return wait_for_process(proc)
def call_for_stdout(command, *args, **kwargs):
kwargs["stdout"] = _subprocess.PIPE
proc = start_process(command, *args, **kwargs)
output = proc.communicate()[0].decode("utf-8")
exit_code = proc.poll()
if exit_code != 0:
error = CalledProcessError(exit_code, proc.command_string)
error.output = output
raise error
return output
def call_for_stderr(command, *args, **kwargs):
kwargs["stderr"] = _subprocess.PIPE
proc = start_process(command, *args, **kwargs)
output = proc.communicate()[1].decode("utf-8")
exit_code = proc.poll()
if exit_code != 0:
error = CalledProcessError(exit_code, proc.command_string)
error.output = output
raise error
return output
def call_and_print_on_error(command, *args, **kwargs):
warn("Deprecated! Use call() with quiet=True instead")
kwargs["quiet"] = True
call(command, *args, **kwargs)
_child_processes = list()
class _Process(_subprocess.Popen):
def __init__(self, command, options, name, command_string, temp_output_file):
super(_Process, self).__init__(command, **options)
self.name = name
self.command_string = command_string
self.temp_output_file = temp_output_file
_child_processes.append(self)
@property
def exit_code(self):
return self.returncode
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
stop_process(self)
def __repr__(self):
return "process {0} ({1})".format(self.pid, self.name)
def default_sigterm_handler(signum, frame):
for proc in _child_processes:
if proc.poll() is None:
proc.terminate()
exit(-(_signal.SIGTERM))
_signal.signal(_signal.SIGTERM, default_sigterm_handler)
def _command_string(command, args):
elems = ["\"{0}\"".format(x) if " " in x else x for x in command]
string = " ".join(elems)
string = string.format(*args)
return string
_libc = None
if _sys.platform == "linux2":
try:
_libc = _ctypes.CDLL(_ctypes.util.find_library("c"))
except:
_traceback.print_exc()
# output - Send stdout and err to a file
# quiet - No output unless there is an error
def start_process(command, *args, **kwargs):
if _is_string(command):
command = command.format(*args)
command_args = _shlex.split(command)
command_string = command
elif isinstance(command, _collections.Iterable):
assert len(args) == 0, args
command_args = command
command_string = _command_string(command, [])
else:
raise Exception()
command_string = command_string.replace("\n", "\\n")
notice("Calling '{0}'", command_string)
name = kwargs.get("name", command_args[0])
kwargs["stdout"] = kwargs.get("stdout", _sys.stdout)
kwargs["stderr"] = kwargs.get("stderr", _sys.stderr)
if "output" in kwargs:
out = kwargs.pop("output")
kwargs["stdout"] = out
kwargs["stderr"] = out
temp_output_file = None
if "quiet" in kwargs:
if kwargs.pop("quiet") is True:
temp_output_file = make_temp_file()
temp_output = open(temp_output_file, "w")
kwargs["stdout"] = temp_output
kwargs["stderr"] = temp_output
if "preexec_fn" not in kwargs:
if _libc is not None:
kwargs["preexec_fn"] = _libc.prctl(1, _signal.SIGKILL)
if "shell" in kwargs and kwargs["shell"] is True:
proc = _Process(command_string, kwargs, name, command_string, temp_output_file)
else:
proc = _Process(command_args, kwargs, name, command_string, temp_output_file)
debug("{0} started", proc)
return proc
# Exits without complaint if proc is null
def terminate_process(proc):
if proc is None:
return
notice("Terminating {0}", proc)
if proc.poll() is None:
proc.terminate()
else:
debug("{0} already exited", proc)
def stop_process(proc):
notice("Stopping {0}", proc)
if proc.poll() is not None:
if proc.returncode == 0:
debug("{0} already exited normally", proc)
elif proc.returncode == -(_signal.SIGTERM):
debug("{0} was already terminated", proc)
else:
debug("{0} already exited with code {1}", proc, proc.returncode)
return
proc.terminate()
return wait_for_process(proc)
def wait_for_process(proc):
debug("Waiting for {0} to exit", proc)
proc.wait()
if proc.returncode == 0:
debug("{0} exited normally", proc)
elif proc.returncode == -(_signal.SIGTERM):
debug("{0} exited after termination", proc)
else:
debug("{0} exited with code {1}", proc, proc.exit_code)
if proc.temp_output_file is not None:
eprint(read(proc.temp_output_file), end="")
if proc.temp_output_file is not None:
_remove(proc.temp_output_file)
return proc.returncode
def check_process(proc):
wait_for_process(proc)
if proc.returncode != 0:
raise CalledProcessError(proc.returncode, proc.command_string)
def exec_process(command, *args):
if _is_string(command):
command = command.format(*args)
command_args = _shlex.split(command)
command_string = command
elif isinstance(command, _collections.Iterable):
assert len(args) == 0, args
command_args = command
command_string = _command_string(command, [])
else:
raise Exception()
notice("Calling '{0}'", command_string)
_os.execvp(command_args[0], command_args[1:])
def make_archive(input_dir, output_dir, archive_stem):
assert is_dir(input_dir), input_dir
assert is_dir(output_dir), output_dir
assert _is_string(archive_stem), archive_stem
with temp_working_dir() as dir:
temp_input_dir = join(dir, archive_stem)
copy(input_dir, temp_input_dir)
make_dir(output_dir)
output_file = "{0}.tar.gz".format(join(output_dir, archive_stem))
output_file = absolute_path(output_file)
with working_dir(dir):
call("tar -czf {0} {1}", output_file, archive_stem)
return output_file
def extract_archive(archive_file, output_dir=None):
assert is_file(archive_file), archive_file
assert output_dir is None or is_dir(output_dir), output_dir
archive_file = absolute_path(archive_file)
with working_dir(output_dir):
call("tar -xf {0}", archive_file)
return output_dir
def rename_archive(archive_file, new_archive_stem):
assert is_file(archive_file), archive_file
assert _is_string(new_archive_stem), new_archive_stem
if name_stem(archive_file) == new_archive_stem:
return archive_file
with temp_working_dir() as dir:
extract_archive(archive_file, dir)
input_name = list_dir(dir)[0]
input_dir = join(dir, input_name)
output_file = make_archive(input_dir, dir, new_archive_stem)
output_name = file_name(output_file)
archive_dir = parent_dir(archive_file)
new_archive_file = join(archive_dir, output_name)
move(output_file, new_archive_file)
remove(archive_file)
return new_archive_file
def random_port(min=49152, max=65535):
return _random.randint(min, max)
def wait_for_port(port, host="", timeout=30):
if _is_string(port):
port = int(port)
sock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
start = _time.time()
try:
while True:
if sock.connect_ex((host, port)) == 0:
return
sleep(0.1)
if _time.time() - start > timeout:
fail("Timed out waiting for port {0} to open", port)
finally:
sock.close()
def plural(noun, count=0):
if noun is None:
return ""
if count == 1:
return noun
if noun.endswith("s"):
return "{}ses".format(noun)
return "{}s".format(noun)
# Modified copytree impl that allows for already existing destination
# dirs
def _copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = _os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not exists(dst):
_os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = _os.path.join(src, name)
dstname = _os.path.join(dst, name)
try:
if symlinks and _os.path.islink(srcname):
linkto = _os.readlink(srcname)
_os.symlink(linkto, dstname)
elif _os.path.isdir(srcname):
_copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
_shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except _shutil.Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
_shutil.copystat(src, dst)
except OSError as why:
if _shutil.WindowsError is not None and isinstance \
(why, _shutil.WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise _shutil.Error(errors)
def _is_string(obj):
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
| 25.700478 | 111 | 0.646982 |
ea8e37c93624e24f6f51490b6c62c5a90f3a56b0 | 4,245 | py | Python | APIs/APIs 8 - Hashing/app/main.py | Nikolas010101/Projects | 1338e8073dd85b5b9d2b107b51eff966eb4051e9 | [
"MIT"
] | null | null | null | APIs/APIs 8 - Hashing/app/main.py | Nikolas010101/Projects | 1338e8073dd85b5b9d2b107b51eff966eb4051e9 | [
"MIT"
] | null | null | null | APIs/APIs 8 - Hashing/app/main.py | Nikolas010101/Projects | 1338e8073dd85b5b9d2b107b51eff966eb4051e9 | [
"MIT"
] | null | null | null | from typing import Optional, List
import bcrypt
from fastapi import Body, FastAPI, Response, status, HTTPException, Depends
from fastapi.params import Body
from random import randrange
import psycopg2
from psycopg2.extras import RealDictCursor
import time
from . import models, schemas
from .database import engine, get_db
from sqlalchemy.orm import Session
from passlib.context import CryptContext
pwd_context = CryptContext(schemas = ["bcrypt"], deprecated = "auto")
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
while True:
try:
conn = psycopg2.connect(host = 'localhost', database = 'fastapi', user = 'postgres', password = '29480783', cursor_factory=RealDictCursor)
cursor = conn.cursor()
print('Database connection was succesfull!')
break
except Exception as error:
print("Connecting to database failed")
print("Error: ", error)
time.sleep(2)
#decorator passa a função como parâmetro para o método do objeto
@app.get('/')
def root():
return {'message':'Hello World'}
@app.get('/posts', response_model=List[schemas.Post])
def get_posts(db: Session = Depends(get_db)):
# cursor.execute("""SELECT * FROM posts""")
# posts = cursor.fetchall()
posts = db.query(models.Post).all()
return posts
@app.post('/posts', status_code=status.HTTP_201_CREATED, response_model = schemas.Post)
def create_posts(post: schemas.PostCreate, db: Session = Depends(get_db)):
# cursor.execute("""INSERT INTO posts (title, content, published) VALUES (%s, %s, %s) RETURNING *""",(post.title, post.content, post.published))
# #RETORNA O VALOR DA QUERY DO CURSOR
# new_post = cursor.fetchone()
# #COMITAR AS MUDANÇAS NA BASE DE DADOS
# conn.commit()
#podemos adicionar cada entrada manualmente, ou podemos dar unpack no modelo do pydantic
# new_post = models.Post(title = post.title, content = post.content, published = post.published)
new_post = models.Post(**post.dict())
db.add(new_post)
db.commit()
#refresh age como um RETURNING *
db.refresh(new_post)
return new_post
@app.get('/posts/{id}', response_model = schemas.Post)
def get_post(id: int, db: Session = Depends(get_db)):
# cursor.execute("""SELECT * FROM posts WHERE id = %s""", (str(id)))
# post = cursor.fetchone()
post = db.query(models.Post).filter(models.Post.id == id).first()
if not post:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail = f'post with id: {id} was not found')
return post
@app.delete('/posts/{id}', status_code=status.HTTP_204_NO_CONTENT)
def delete_post(id: int, db: Session = Depends(get_db)):
# cursor.execute("""DELETE FROM posts WHERE id = %s RETURNING *""", (str(id)))
# deleted_post = cursor.fetchone()
# conn.commit()
post = db.query(models.Post).filter(models.Post.id == id)
if post.first() == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail = f'post with id: {id} was not found')
post.delete(synchronize_session=False)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@app.put('/posts/{id}', response_model = schemas.Post)
def update_post(id: int, post: schemas.PostCreate, db: Session = Depends(get_db)):
# cursor.execute("""UPDATE posts SET title = %s, content = %s, published = %s WHERE id = %s RETURNING *""", (post.title, post.content, post.published, str(id)))
# updated_post = cursor.fetchone()
# conn.commit()
post_query = db.query(models.Post).filter(models.Post.id == id)
updated_post = post_query.first()
if updated_post == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail = f'post with id: {id} was not found')
post_query.update(post.dict(), synchronize_session=False)
db.commit()
return post_query.first()
@app.post('/users', status_code=status.HTTP_201_CREATED, response_model=schemas.UserOut)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
new_user = models.User(**user.dict())
db.add(new_user)
db.commit()
#refresh age como um RETURNING *
db.refresh(new_user)
return new_user | 36.594828 | 164 | 0.686926 |
209ab058269786d1bc289dd5ee3e9aba0db6d8a7 | 2,594 | py | Python | objectextensions/extension.py | immijimmi/objectextensions | 783d799263e37757116cb7d2785e889e48c77506 | [
"MIT"
] | null | null | null | objectextensions/extension.py | immijimmi/objectextensions | 783d799263e37757116cb7d2785e889e48c77506 | [
"MIT"
] | null | null | null | objectextensions/extension.py | immijimmi/objectextensions | 783d799263e37757116cb7d2785e889e48c77506 | [
"MIT"
] | null | null | null | from wrapt import decorator
from inspect import getfullargspec
from copy import deepcopy
from typing import Generator, Callable, Any, Union, Type
from .constants import ErrorMessages
class Extension:
@staticmethod
def can_extend(target_cls: Type["Extendable"]) -> bool:
"""
Should return a bool indicating whether this Extension can be applied to the target class
"""
raise NotImplementedError
@staticmethod
def extend(target_cls: Type["Extendable"]) -> None:
"""
Any modification of the target class should take place in this function
"""
pass
@staticmethod
def _wrap(target_cls: Type["Extendable"], method_name: str,
gen_func: Callable[["Extendable", Any, Any], Generator[None, Any, None]]) -> None:
"""
Used to wrap an existing method on the target class.
Passes copies of the method parameters to the generator function provided.
The generator function should yield once,
with the yield statement receiving a copy of the result of executing the core method
"""
method = getattr(target_cls, method_name)
method_args = getfullargspec(method).args
if len(method_args) == 0 or method_args[0] != "self":
ErrorMessages.wrap_static(method_name)
@decorator # This will preserve the original method signature when wrapping the method
def wrapper(func, self, args, kwargs):
gen = gen_func(self, *try_copy(args), **try_copy(kwargs))
next(gen)
result = func(*args, **kwargs)
try:
gen.send(try_copy(result))
except StopIteration:
pass
return result
setattr(target_cls, method_name, wrapper(method))
@staticmethod
def _set(target: Union[Type["Extendable"], "Extendable"], attribute_name: str, value: Any) -> None:
"""
Used to safely add new attributes to an extendable class or instance. In contrast with assigning them directly,
this method will raise an error if the attribute already exists (for example, if another extension added it)
to ensure compatibility issues are flagged and can be dealt with easily
"""
if hasattr(target, attribute_name):
ErrorMessages.duplicate_attribute(attribute_name)
setattr(target, attribute_name, value)
def try_copy(item: Any) -> Any:
"""
A failsafe deepcopy wrapper
"""
try:
return deepcopy(item)
except:
return item
| 31.253012 | 119 | 0.646106 |
95f00be37bfc9cafbcbee9651eb5da002664fb14 | 2,600 | py | Python | scripts/plot_training_metrics.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | scripts/plot_training_metrics.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | scripts/plot_training_metrics.py | shahrukhqasim/HGCalML | 2808564b31c89d9b7eb882734f6aebc6f35e94f3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sql_credentials
import experiment_database_reading_manager
import argparse
from training_metrics_plots import TrainingMetricPlots
parser = argparse.ArgumentParser(
'Produce running metrics plot (loss, efficiency and more)')
parser.add_argument('experiment_name',
help='Experiment name on the server (normally exists in model_train_output/unique_id.txt) or database file path (check is_database_file argument)')
parser.add_argument('--is_database_file', help='If you want to plot from a database file instead of server \nThe database file is normally located in model_train_output/training_metrics.db', action='store_true')
parser.add_argument('--plot_all',
help='Plot only a few metrics for faster performance or all of them?',action='store_true')
parser.add_argument('--running_average',
help='N running average elements, 1 means no running average', default='10')
parser.add_argument('output',
help='HTML file where to produce output')
parser.add_argument('--ignore_cache',
help='''Normally this script caches data so it doesn't have to pull everything
again and again but this option will ignore the cache''',action='store_true')
parser.add_argument('--trackml',
help='''For trackml problem, will plot different metrics''',action='store_true')
args = parser.parse_args()
if not args.is_database_file:
print("Gonna get data from the server, using experiment_name %s" % args.experiment_name)
manager = experiment_database_reading_manager.ExperimentDatabaseReadingManager(mysql_credentials=sql_credentials.credentials)
else:
manager = experiment_database_reading_manager.ExperimentDatabaseReadingManager(file=args.experiment_name)
if args.trackml:
plotter = TrainingMetricPlots(manager, args.experiment_name, ignore_cache=args.ignore_cache, cache_path='training_metrics_plotter_trackml.cache',
metrics=['beta_threshold', 'distance_threshold', 'loss', 'trackml_score', 'num_truth_particles', 'num_reco_tracks'],
titles=['Beta threshold', 'Distance threshold', 'loss', 'trackml score', 'Num truth particles', 'Num reco tracks'],
database_table_name='training_performance_metrics_trackml'
)
else:
plotter = TrainingMetricPlots(manager, args.experiment_name, ignore_cache=args.ignore_cache)
plotter.do_plot_to_html(args.output, average_over=int(args.running_average)) | 60.465116 | 211 | 0.721154 |
f279b1fbcca1a462e6822d1175a2f0f0c6071c79 | 1,402 | py | Python | app/app/urls.py | anhlt59/Blog-app | f91eeaddcd338b9e8d3cf53b99affea9df923eda | [
"MIT"
] | null | null | null | app/app/urls.py | anhlt59/Blog-app | f91eeaddcd338b9e8d3cf53b99affea9df923eda | [
"MIT"
] | null | null | null | app/app/urls.py | anhlt59/Blog-app | f91eeaddcd338b9e8d3cf53b99affea9df923eda | [
"MIT"
] | null | null | null | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.i18n import i18n_patterns
from django.shortcuts import redirect
admin.site.site_header = "Blog Admin"
admin.site.site_title = "Blog Admin Portal"
admin.site.index_title = "Welcome to Blog Researcher Portal"
urlpatterns = i18n_patterns(
path('admin/', admin.site.urls),
path('', include('blog.urls')),
prefix_default_language=False,
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#+ staticfiles_urlpatterns()
| 37.891892 | 78 | 0.738944 |
943d0d0b2b48850762f7eb43a40bde7eafee6e6e | 2,565 | py | Python | vklad/migrations/0001_initial.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 2 | 2020-09-08T12:51:56.000Z | 2021-08-18T15:27:52.000Z | vklad/migrations/0001_initial.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 1 | 2021-12-13T20:43:35.000Z | 2021-12-13T20:43:35.000Z | vklad/migrations/0001_initial.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-15 08:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserVklad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invest_cash', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('today_cash', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('ostatok', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('percent_profit', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('year_percent_profit', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VkladInvestHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('cash', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('popolnenie', models.BooleanField(default=True)),
('vklad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vklad.UserVklad')),
],
options={
'ordering': ['-date', 'cash'],
},
),
migrations.CreateModel(
name='VkladHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('percent_profit', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('year_percent_profit', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('vklad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vklad.UserVklad')),
],
options={
'ordering': ['-date'],
},
),
]
| 45 | 122 | 0.596881 |
a34e5952d01cb043976d9ee3e15d1e59fafc07cd | 36,823 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/acme_certificate.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/acme_certificate.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/acme_certificate.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: acme_certificate
author: "Michael Gruener (@mgruener)"
version_added: "2.2"
short_description: Create SSL certificates with an ACME protocol endpoint
description:
- "Create and renew SSL certificates with a CA supporting the ACME protocol,
such as Let's Encrypt (U(https://letsencrypt.org)). For details see
U(https://letsencrypt.org). The current implementation supports the
C(http-01) and C(dns-01) challenges."
- "To use this module, it has to be executed twice. Either as two
different tasks in the same run or during two runs. Note that the output
of the first run needs to be recorded and passed to the second run as the
module argument C(data)."
- "Between these two tasks you have to fulfill the required steps for the
chosen challenge by whatever means necessary. For C(http-01) that means
creating the necessary challenge file on the destination webserver. For
C(dns-01) the necessary dns record has to be created.
It is I(not) the responsibility of this module to perform these steps."
- "For details on how to fulfill these challenges, you might have to read through
U(https://tools.ietf.org/html/draft-ietf-acme-acme-12#section-8).
Also, consider the examples provided for this module."
- "Although the defaults are chosen so that the module can be used with
the Let's Encrypt CA, the module can be used with any service using the ACME
v1 or v2 protocol."
- "At least one of C(dest) and C(fullchain_dest) must be specified."
- "Note: this module was called C(letsencrypt) before Ansible 2.6. The usage
did not change."
extends_documentation_fragment:
- acme
options:
account_email:
description:
- "The email address associated with this account."
- "It will be used for certificate expiration warnings."
agreement:
description:
- "URI to a terms of service document you agree to when using the
ACME v1 service at C(acme_directory)."
- Default is latest gathered from C(acme_directory) URL.
- This option will only be used when C(acme_version) is 1.
terms_agreed:
description:
- "Boolean indicating whether you agree to the terms of service document."
- "ACME servers can require this to be true."
- This option will only be used when C(acme_version) is not 1.
type: bool
default: 'no'
version_added: "2.5"
modify_account:
description:
- "Boolean indicating whether the module should create the account if
necessary, and update its contact data."
- "Set to C(no) if you want to use C(acme_account) to manage your
account instead, and to avoid accidental creation of a new account
using an old key if you changed the account key with C(acme_account)."
- "If set to C(no), C(terms_agreed) and C(account_email) are ignored."
type: bool
default: 'yes'
version_added: "2.6"
challenge:
description: The challenge to be performed.
choices: [ 'http-01', 'dns-01']
default: 'http-01'
csr:
description:
- "File containing the CSR for the new certificate."
- "Can be created with C(openssl req ...)."
- "The CSR may contain multiple Subject Alternate Names, but each one
will lead to an individual challenge that must be fulfilled for the
CSR to be signed."
- "I(Note): the private key used to create the CSR I(must not) be the the
account key. This is a bad idea from a security point of view, and
the CA should not accept the CSR. Let's Encrypt will return an error
in this case."
required: true
aliases: ['src']
data:
description:
- "The data to validate ongoing challenges. This must be specified for
the second run of the module only."
- "The value that must be used here will be provided by a previous use
of this module. See the examples for more details."
- "I(Note): the C(data) option was marked as C(no_log) up to
Ansible 2.5. From Ansible 2.6 on, it is no longer marked this way
as it causes error messages to be come unusable, and C(data) does
not contain any information which can be used without having
access to the account key or which are not public anyway."
dest:
description:
- "The destination file for the certificate."
- "Required if C(fullchain_dest) is not specified."
aliases: ['cert']
fullchain_dest:
description:
- "The destination file for the full chain (i.e. certificate followed
by chain of intermediate certificates)."
- "Required if C(dest) is not specified."
version_added: 2.5
aliases: ['fullchain']
chain_dest:
description:
- If specified, the intermediate certificate will be written to this file.
aliases: ['chain']
version_added: 2.5
remaining_days:
description:
- "The number of days the certificate must have left being valid.
If C(cert_days < remaining_days), then it will be renewed.
If the certificate is not renewed, module return values will not
include C(challenge_data)."
default: 10
deactivate_authzs:
description:
- "Deactivate authentication objects (authz) after issuing a certificate,
or when issuing the certificate failed."
- "Authentication objects are bound to an account key and remain valid
for a certain amount of time, and can be used to issue certificates
without having to re-authenticate the domain. This can be a security
concern."
type: bool
default: 'no'
version_added: 2.6
force:
description:
- Enforces the execution of the challenge and validation, even if an
existing certificate is still valid.
- This is especially helpful when having an updated CSR e.g. with
additional domains for which a new certificate is desired.
type: bool
default: 'no'
version_added: 2.6
'''
EXAMPLES = R'''
### Example with HTTP challenge ###
- name: Create a challenge for sample.com using a account key from a variable.
acme_certificate:
account_key_content: "{{ account_private_key }}"
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
register: sample_com_challenge
# Alternative first step:
- name: Create a challenge for sample.com using a account key from hashi vault.
acme_certificate:
account_key_content: "{{ lookup('hashi_vault', 'secret=secret/account_private_key:value') }}"
csr: /etc/pki/cert/csr/sample.com.csr
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
register: sample_com_challenge
# Alternative first step:
- name: Create a challenge for sample.com using a account key file.
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - copy:
# dest: /var/www/html/{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
# content: "{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
# when: sample_com_challenge is changed
- name: Let the challenge be validated and retrieve the cert and intermediate certificate
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
chain_dest: /etc/httpd/ssl/sample.com-intermediate.crt
data: "{{ sample_com_challenge }}"
### Example with DNS challenge against production ACME server ###
- name: Create a challenge for sample.com using a account key file.
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
account_email: myself@sample.com
src: /etc/pki/cert/csr/sample.com.csr
cert: /etc/httpd/ssl/sample.com.crt
challenge: dns-01
acme_directory: https://acme-v01.api.letsencrypt.org/directory
# Renew if the certificate is at least 30 days old
remaining_days: 60
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - route53:
# zone: sample.com
# record: "{{ sample_com_challenge.challenge_data['sample.com']['dns-01'].record }}"
# type: TXT
# ttl: 60
# # Note: route53 requires TXT entries to be enclosed in quotes
# value: "{{ sample_com_challenge.challenge_data['sample.com']['dns-01'].resource_value }}"
# when: sample_com_challenge is changed
#
# Alternative way:
#
# - route53:
# zone: sample.com
# record: "{{ item.key }}"
# type: TXT
# ttl: 60
# # Note: item.value is a list of TXT entries, and route53
# # requires every entry to be enclosed in quotes
# value: "{{ item.value | map('regex_replace', '^(.*)$', '\'\\1\'' ) | list }}"
# with_dict: sample_com_challenge.challenge_data_dns
# when: sample_com_challenge is changed
- name: Let the challenge be validated and retrieve the cert and intermediate certificate
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
account_email: myself@sample.com
src: /etc/pki/cert/csr/sample.com.csr
cert: /etc/httpd/ssl/sample.com.crt
fullchain: /etc/httpd/ssl/sample.com-fullchain.crt
chain: /etc/httpd/ssl/sample.com-intermediate.crt
challenge: dns-01
acme_directory: https://acme-v01.api.letsencrypt.org/directory
remaining_days: 60
data: "{{ sample_com_challenge }}"
'''
RETURN = '''
cert_days:
description: the number of days the certificate remains valid.
returned: success
type: int
challenge_data:
description: per domain / challenge type challenge data
returned: changed
type: complex
contains:
resource:
description: the challenge resource that must be created for validation
returned: changed
type: string
sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
resource_value:
description: the value the resource has to produce for the validation
returned: changed
type: string
sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
record:
description: the full DNS record's name for the challenge
returned: changed and challenge is C(dns-01)
type: string
sample: _acme-challenge.example.com
version_added: "2.5"
challenge_data_dns:
description: list of TXT values per DNS record, in case challenge is C(dns-01)
returned: changed
type: dict
version_added: "2.5"
authorizations:
description: ACME authorization data.
returned: changed
type: complex
contains:
authorization:
description: ACME authorization object. See U(https://tools.ietf.org/html/draft-ietf-acme-acme-12#section-7.1.4)
returned: success
type: dict
order_uri:
description: ACME order URI.
returned: changed
type: string
version_added: "2.5"
finalization_uri:
description: ACME finalization URI.
returned: changed
type: string
version_added: "2.5"
account_uri:
description: ACME account URI.
returned: changed
type: string
version_added: "2.5"
'''
from ansible.module_utils.acme import (
ModuleFailException, fetch_url, write_file, nopad_b64, simple_get, ACMEAccount
)
import base64
import hashlib
import locale
import os
import re
import textwrap
import time
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes
def get_cert_days(module, cert_file):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found. If cert_file contains more than one
certificate, only the first one will be considered.
'''
if not os.path.exists(cert_file):
return -1
openssl_bin = module.get_bin_path('openssl', True)
openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
dummy, out, dummy = module.run_command(openssl_cert_cmd, check_rc=True, encoding=None)
try:
not_after_str = re.search(r"\s+Not After\s*:\s+(.*)", out.decode('utf8')).group(1)
not_after = datetime.fromtimestamp(time.mktime(time.strptime(not_after_str, '%b %d %H:%M:%S %Y %Z')))
except AttributeError:
raise ModuleFailException("No 'Not after' date found in {0}".format(cert_file))
except ValueError:
raise ModuleFailException("Failed to parse 'Not after' date of {0}".format(cert_file))
now = datetime.utcnow()
return (not_after - now).days
class ACMEClient(object):
'''
ACME client class. Uses an ACME account object and a CSR to
start and validate ACME challenges and download the respective
certificates.
'''
def __init__(self, module):
self.module = module
self.version = module.params['acme_version']
self.challenge = module.params['challenge']
self.csr = module.params['csr']
self.dest = module.params.get('dest')
self.fullchain_dest = module.params.get('fullchain_dest')
self.chain_dest = module.params.get('chain_dest')
self.account = ACMEAccount(module)
self.directory = self.account.directory
self.data = module.params['data']
self.authorizations = None
self.cert_days = -1
self.order_uri = self.data.get('order_uri') if self.data else None
self.finalize_uri = self.data.get('finalize_uri') if self.data else None
# Make sure account exists
modify_account = module.params['modify_account']
if modify_account or self.version > 1:
contact = []
if module.params['account_email']:
contact.append('mailto:' + module.params['account_email'])
self.changed = self.account.init_account(
contact,
agreement=module.params.get('agreement'),
terms_agreed=module.params.get('terms_agreed'),
allow_creation=modify_account,
update_contact=modify_account
)
else:
# This happens if modify_account is False and the ACME v1
# protocol is used. In this case, we do not call init_account()
# to avoid accidental creation of an account. This is OK
# since for ACME v1, the account URI is not needed to send a
# signed ACME request.
pass
# Extract list of domains from CSR
if not os.path.exists(self.csr):
raise ModuleFailException("CSR %s not found" % (self.csr))
self._openssl_bin = module.get_bin_path('openssl', True)
self.domains = self._get_csr_domains()
def _get_csr_domains(self):
'''
Parse the CSR and return the list of requested domains
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"]
dummy, out, dummy = self.module.run_command(openssl_csr_cmd, check_rc=True)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", to_text(out, errors='surrogate_or_strict'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def _add_or_update_auth(self, domain, auth):
'''
Add or update the given authroization in the global authorizations list.
Return True if the auth was updated/added and False if no change was
necessary.
'''
if self.authorizations.get(domain) == auth:
return False
self.authorizations[domain] = auth
return True
def _new_authz_v1(self, domain):
'''
Create a new authorization for the given domain.
Return the authorization object of the new authorization
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
if self.account.uri is None:
return
new_authz = {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
}
result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
if info['status'] not in [200, 201]:
raise ModuleFailException("Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
result['uri'] = info['location']
return result
def _get_challenge_data(self, auth, domain):
'''
Returns a dict with the data for all proposed (and supported) challenges
of the given authorization.
'''
data = {}
# no need to choose a specific challenge here as this module
# is not responsible for fulfilling the challenges. Calculate
# and return the required information for each challenge.
for challenge in auth['challenges']:
type = challenge['type']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
if type == 'http-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-8.3
resource = '.well-known/acme-challenge/' + token
data[type] = {'resource': resource, 'resource_value': keyauthorization}
elif type == 'dns-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-8.5
resource = '_acme-challenge'
value = nopad_b64(hashlib.sha256(to_bytes(keyauthorization)).digest())
record = (resource + domain[1:]) if domain.startswith('*.') else (resource + '.' + domain)
data[type] = {'resource': resource, 'resource_value': value, 'record': record}
else:
continue
return data
def _fail_challenge(self, domain, auth, error):
'''
Aborts with a specific error for a challenge.
'''
error_details = ''
# multiple challenges could have failed at this point, gather error
# details for all of them before failing
for challenge in auth['challenges']:
if challenge['status'] == 'invalid':
error_details += ' CHALLENGE: {0}'.format(challenge['type'])
if 'error' in challenge:
error_details += ' DETAILS: {0};'.format(challenge['error']['detail'])
else:
error_details += ';'
raise ModuleFailException("{0}: {1}".format(error.format(domain), error_details))
def _validate_challenges(self, domain, auth):
'''
Validate the authorization provided in the auth dict. Returns True
when the validation was successful and False when it was not.
'''
for challenge in auth['challenges']:
if self.challenge != challenge['type']:
continue
uri = challenge['uri'] if self.version == 1 else challenge['url']
challenge_response = {}
if self.version == 1:
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
challenge_response["resource"] = "challenge"
challenge_response["keyAuthorization"] = keyauthorization
result, info = self.account.send_signed_request(uri, challenge_response)
if info['status'] not in [200, 202]:
raise ModuleFailException("Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result))
status = ''
while status not in ['valid', 'invalid', 'revoked']:
result = simple_get(self.module, auth['uri'])
result['uri'] = auth['uri']
if self._add_or_update_auth(domain, result):
self.changed = True
# draft-ietf-acme-acme-02
# "status (required, string): ...
# If this field is missing, then the default value is "pending"."
if self.version == 1 and 'status' not in result:
status = 'pending'
else:
status = result['status']
time.sleep(2)
if status == 'invalid':
self._fail_challenge(domain, result, 'Authorization for {0} returned invalid')
return status == 'valid'
def _finalize_cert(self):
'''
Create a new certificate based on the csr.
Return the certificate object as dict
https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-7.4
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
dummy, out, dummy = self.module.run_command(openssl_csr_cmd, check_rc=True)
new_cert = {
"csr": nopad_b64(to_bytes(out)),
}
result, info = self.account.send_signed_request(self.finalize_uri, new_cert)
if info['status'] not in [200]:
raise ModuleFailException("Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
order = info['location']
status = result['status']
while status not in ['valid', 'invalid']:
time.sleep(2)
result = simple_get(self.module, order)
status = result['status']
if status != 'valid':
raise ModuleFailException("Error new cert: CODE: {0} STATUS: {1} RESULT: {2}".format(info['status'], status, result))
return result['certificate']
def _der_to_pem(self, der_cert):
'''
Convert the DER format certificate in der_cert to a PEM format
certificate and return it.
'''
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64)))
def _download_cert(self, url):
'''
Download and parse the certificate chain.
https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-7.4.2
'''
resp, info = fetch_url(self.module, url, headers={'Accept': 'application/pem-certificate-chain'})
try:
content = resp.read()
except AttributeError:
content = info.get('body')
if not content or not info['content-type'].startswith('application/pem-certificate-chain'):
raise ModuleFailException("Cannot download certificate chain from {0}: {1} (headers: {2})".format(url, content, info))
cert = None
chain = []
# Parse data
lines = content.decode('utf-8').splitlines(True)
current = []
for line in lines:
if line.strip():
current.append(line)
if line.startswith('-----END CERTIFICATE-----'):
if cert is None:
cert = ''.join(current)
else:
chain.append(''.join(current))
current = []
# Process link-up headers if there was no chain in reply
if not chain and 'link' in info:
link = info['link']
parsed_link = re.match(r'<(.+)>;rel="(\w+)"', link)
if parsed_link and parsed_link.group(2) == "up":
chain_link = parsed_link.group(1)
chain_result, chain_info = fetch_url(self.module, chain_link, method='GET')
if chain_info['status'] in [200, 201]:
chain.append(self._der_to_pem(chain_result.read()))
if cert is None or current:
raise ModuleFailException("Failed to parse certificate chain download from {0}: {1} (headers: {2})".format(url, content, info))
return {'cert': cert, 'chain': chain}
def _new_cert_v1(self):
'''
Create a new certificate based on the CSR (ACME v1 protocol).
Return the certificate object as dict
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
dummy, out, dummy = self.module.run_command(openssl_csr_cmd, check_rc=True)
new_cert = {
"resource": "new-cert",
"csr": nopad_b64(to_bytes(out)),
}
result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert)
chain = []
if 'link' in info:
link = info['link']
parsed_link = re.match(r'<(.+)>;rel="(\w+)"', link)
if parsed_link and parsed_link.group(2) == "up":
chain_link = parsed_link.group(1)
chain_result, chain_info = fetch_url(self.module, chain_link, method='GET')
if chain_info['status'] in [200, 201]:
chain = [self._der_to_pem(chain_result.read())]
if info['status'] not in [200, 201]:
raise ModuleFailException("Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
return {'cert': self._der_to_pem(result), 'uri': info['location'], 'chain': chain}
def _new_order_v2(self):
'''
Start a new certificate order (ACME v2 protocol).
https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-7.4
'''
identifiers = []
for domain in self.domains:
identifiers.append({
'type': 'dns',
'value': domain,
})
new_order = {
"identifiers": identifiers
}
result, info = self.account.send_signed_request(self.directory['newOrder'], new_order)
if info['status'] not in [201]:
raise ModuleFailException("Error new order: CODE: {0} RESULT: {1}".format(info['status'], result))
for auth_uri in result['authorizations']:
auth_data = simple_get(self.module, auth_uri)
auth_data['uri'] = auth_uri
domain = auth_data['identifier']['value']
if auth_data.get('wildcard', False):
domain = '*.{0}'.format(domain)
self.authorizations[domain] = auth_data
self.order_uri = info['location']
self.finalize_uri = result['finalize']
def is_first_step(self):
'''
Return True if this is the first execution of this module, i.e. if a
sufficient data object from a first run has not been provided.
'''
if (self.data is None) or ('authorizations' not in self.data):
return True
if self.finalize_uri is None and self.version != 1:
return True
return False
def start_challenges(self):
'''
Create new authorizations for all domains of the CSR,
respectively start a new order for ACME v2.
'''
self.authorizations = {}
if self.version == 1:
for domain in self.domains:
new_auth = self._new_authz_v1(domain)
self._add_or_update_auth(domain, new_auth)
else:
self._new_order_v2()
self.changed = True
def get_challenges_data(self):
'''
Get challenge details for the chosen challenge type.
Return a tuple of generic challenge details, and specialized DNS challenge details.
'''
# Get general challenge data
data = {}
for domain, auth in self.authorizations.items():
data[domain] = self._get_challenge_data(self.authorizations[domain], domain)
# Get DNS challenge data
data_dns = {}
if self.challenge == 'dns-01':
for domain, challenges in data.items():
if self.challenge in challenges:
values = data_dns.get(challenges[self.challenge]['record'])
if values is None:
values = []
data_dns[challenges[self.challenge]['record']] = values
values.append(challenges[self.challenge]['resource_value'])
return data, data_dns
def finish_challenges(self):
'''
Verify challenges for all domains of the CSR.
'''
self.authorizations = {}
for domain, auth in self.data['authorizations'].items():
self.authorizations[domain] = auth
if auth['status'] == 'pending':
self._validate_challenges(domain, auth)
def get_certificate(self):
'''
Request a new certificate and write it to the destination file.
First verifies whether all authorizations are valid; if not, aborts
with an error.
'''
for domain in self.domains:
auth = self.authorizations.get(domain)
if auth is None:
raise ModuleFailException('Found no authorization information for "{0}"!'.format(domain))
if 'status' not in auth:
self._fail_challenge(domain, auth, 'Authorization for {0} returned no status')
if auth['status'] != 'valid':
self._fail_challenge(domain, auth, 'Authorization for {0} returned status ' + str(auth['status']))
if self.version == 1:
cert = self._new_cert_v1()
else:
cert_uri = self._finalize_cert()
cert = self._download_cert(cert_uri)
if cert['cert'] is not None:
pem_cert = cert['cert']
chain = [link for link in cert.get('chain', [])]
if self.dest and write_file(self.module, self.dest, pem_cert.encode('utf8')):
self.cert_days = get_cert_days(self.module, self.dest)
self.changed = True
if self.fullchain_dest and write_file(self.module, self.fullchain_dest, (pem_cert + "\n".join(chain)).encode('utf8')):
self.cert_days = get_cert_days(self.module, self.fullchain_dest)
self.changed = True
if self.chain_dest and write_file(self.module, self.chain_dest, ("\n".join(chain)).encode('utf8')):
self.changed = True
def deactivate_authzs(self):
'''
Deactivates all valid authz's. Does not raise exceptions.
https://community.letsencrypt.org/t/authorization-deactivation/19860/2
https://tools.ietf.org/html/draft-ietf-acme-acme-09#section-7.5.2
'''
authz_deactivate = {
'status': 'deactivated'
}
if self.version == 1:
authz_deactivate['resource'] = 'authz'
if self.authorizations:
for domain in self.domains:
auth = self.authorizations.get(domain)
if auth is None or auth.get('status') != 'valid':
continue
try:
result, info = self.account.send_signed_request(auth['uri'], authz_deactivate)
if 200 <= info['status'] < 300 and result.get('status') == 'deactivated':
auth['status'] = 'deactivated'
except Exception as e:
# Ignore errors on deactivating authzs
pass
if auth.get('status') != 'deactivated':
self.module.warn(warning='Could not deactivate authz object {0}.'.format(auth['uri']))
def main():
module = AnsibleModule(
argument_spec=dict(
account_key_src=dict(type='path', aliases=['account_key']),
account_key_content=dict(type='str', no_log=True),
modify_account=dict(required=False, type='bool', default=True),
acme_directory=dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
acme_version=dict(required=False, default=1, choices=[1, 2], type='int'),
validate_certs=dict(required=False, default=True, type='bool'),
account_email=dict(required=False, default=None, type='str'),
agreement=dict(required=False, type='str'),
terms_agreed=dict(required=False, default=False, type='bool'),
challenge=dict(required=False, default='http-01', choices=['http-01', 'dns-01'], type='str'),
csr=dict(required=True, aliases=['src'], type='path'),
data=dict(required=False, default=None, type='dict'),
dest=dict(aliases=['cert'], type='path'),
fullchain_dest=dict(aliases=['fullchain'], type='path'),
chain_dest=dict(required=False, default=None, aliases=['chain'], type='path'),
remaining_days=dict(required=False, default=10, type='int'),
deactivate_authzs=dict(required=False, default=False, type='bool'),
force=dict(required=False, default=False, type='bool'),
),
required_one_of=(
['account_key_src', 'account_key_content'],
['dest', 'fullchain_dest'],
),
mutually_exclusive=(
['account_key_src', 'account_key_content'],
),
supports_check_mode=True,
)
if module._name == 'letsencrypt':
module.deprecate("The 'letsencrypt' module is being renamed 'acme_certificate'", version='2.10')
# AnsibleModule() changes the locale, so change it back to C because we rely on time.strptime() when parsing certificate dates.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
locale.setlocale(locale.LC_ALL, 'C')
if not module.params.get('validate_certs'):
module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' +
'This should only be done for testing against a local ACME server for ' +
'development purposes, but *never* for production purposes.')
try:
if module.params.get('dest'):
cert_days = get_cert_days(module, module.params['dest'])
else:
cert_days = get_cert_days(module, module.params['fullchain_dest'])
if module.params['force'] or cert_days < module.params['remaining_days']:
# If checkmode is active, base the changed state solely on the status
# of the certificate file as all other actions (accessing an account, checking
# the authorization status...) would lead to potential changes of the current
# state
if module.check_mode:
module.exit_json(changed=True, authorizations={}, challenge_data={}, cert_days=cert_days)
else:
client = ACMEClient(module)
client.cert_days = cert_days
if client.is_first_step():
# First run: start challenges / start new order
client.start_challenges()
else:
# Second run: finish challenges, and get certificate
try:
client.finish_challenges()
client.get_certificate()
finally:
if module.params['deactivate_authzs']:
client.deactivate_authzs()
data, data_dns = client.get_challenges_data()
module.exit_json(
changed=client.changed,
authorizations=client.authorizations,
finalize_uri=client.finalize_uri,
order_uri=client.order_uri,
account_uri=client.account.uri,
challenge_data=data,
challenge_data_dns=data_dns,
cert_days=client.cert_days
)
else:
module.exit_json(changed=False, cert_days=cert_days)
except ModuleFailException as e:
e.do_fail(module)
if __name__ == '__main__':
main()
| 41.560948 | 139 | 0.621405 |
aa6dedf3c3e590ed9b920fd1dfe1c8b621be16c3 | 927 | py | Python | BOJwithDongbinNa/1991/1991.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | BOJwithDongbinNa/1991/1991.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | BOJwithDongbinNa/1991/1991.py | jiyolla/StudyForCodingTestWithDongbinNa | c070829dd9c7b02b139e56511832c4a3b9f5982f | [
"MIT"
] | null | null | null | def solve():
n = int(input())
child = {}
for _ in range(n):
node, left, right = input().split()
child[node] = (left, right)
print_buf = []
def preorder(node):
print_buf.append(node)
if child[node][0] in child:
preorder(child[node][0])
if child[node][1] in child:
preorder(child[node][1])
def inorder(node):
if child[node][0] in child:
inorder(child[node][0])
print_buf.append(node)
if child[node][1] in child:
inorder(child[node][1])
def postorder(node):
if child[node][0] in child:
postorder(child[node][0])
if child[node][1] in child:
postorder(child[node][1])
print_buf.append(node)
preorder('A')
print_buf.append('\n')
inorder('A')
print_buf.append('\n')
postorder('A')
print(''.join(print_buf))
solve()
| 23.175 | 43 | 0.528587 |
f8be4548bef7cc6a5e6fde200706178fc2ad8c1c | 15,118 | py | Python | py_zipkin/logging_helper.py | gugu/py_zipkin | f48ffeb8bb15608209b6eef449dac9a42246fe75 | [
"Apache-2.0"
] | null | null | null | py_zipkin/logging_helper.py | gugu/py_zipkin | f48ffeb8bb15608209b6eef449dac9a42246fe75 | [
"Apache-2.0"
] | null | null | null | py_zipkin/logging_helper.py | gugu/py_zipkin | f48ffeb8bb15608209b6eef449dac9a42246fe75 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import time
from collections import defaultdict
from logging import NullHandler
from py_zipkin import _encoding_helpers
from py_zipkin import thrift
from py_zipkin.exception import ZipkinError
from py_zipkin.transport import BaseTransportHandler
from py_zipkin.util import generate_random_64bit_string
null_handler = NullHandler()
zipkin_logger = logging.getLogger('py_zipkin.logger')
zipkin_logger.addHandler(null_handler)
zipkin_logger.setLevel(logging.DEBUG)
LOGGING_END_KEY = 'py_zipkin.logging_end'
class ZipkinLoggingContext(object):
"""A logging context specific to a Zipkin trace. If the trace is sampled,
the logging context sends serialized Zipkin spans to a transport_handler.
The logging context sends root "server" or "client" span, as well as all
local child spans collected within this context.
This class should only be used by the main `zipkin_span` entrypoint.
"""
def __init__(
self,
zipkin_attrs,
endpoint,
log_handler,
span_name,
transport_handler,
report_root_timestamp,
binary_annotations=None,
add_logging_annotation=False,
client_context=False,
max_span_batch_size=None,
firehose_handler=None,
):
self.zipkin_attrs = zipkin_attrs
self.endpoint = endpoint
self.log_handler = log_handler
self.span_name = span_name
self.transport_handler = transport_handler
self.response_status_code = 0
self.report_root_timestamp = report_root_timestamp
self.binary_annotations_dict = binary_annotations or {}
self.add_logging_annotation = add_logging_annotation
self.client_context = client_context
self.max_span_batch_size = max_span_batch_size
self.firehose_handler = firehose_handler
self.sa_endpoint = None
def start(self):
"""Actions to be taken before request is handled.
1) Attach `zipkin_logger` to :class:`ZipkinLoggerHandler` object.
2) Record the start timestamp.
"""
zipkin_logger.removeHandler(null_handler)
zipkin_logger.addHandler(self.log_handler)
self.start_timestamp = time.time()
return self
def stop(self):
"""Actions to be taken post request handling.
1) Log the service annotations to scribe.
2) Detach `zipkin_logger` handler.
"""
self.log_spans()
zipkin_logger.removeHandler(self.log_handler)
zipkin_logger.addHandler(null_handler)
def log_spans(self):
"""Main function to log all the annotations stored during the entire
request. This is done if the request is sampled and the response was
a success. It also logs the service (`ss` and `sr`) or the client
('cs' and 'cr') annotations.
"""
# FIXME: Should have a single aggregate handler
if self.firehose_handler:
# FIXME: We need to allow different batching settings per handler
self._log_spans_with_span_sender(
ZipkinBatchSender(self.firehose_handler,
self.max_span_batch_size)
)
if not self.zipkin_attrs.is_sampled:
return
span_sender = ZipkinBatchSender(self.transport_handler,
self.max_span_batch_size)
self._log_spans_with_span_sender(span_sender)
def _log_spans_with_span_sender(self, span_sender):
with span_sender:
end_timestamp = time.time()
# Collect additional annotations from the logging handler
annotations_by_span_id = defaultdict(dict)
binary_annotations_by_span_id = defaultdict(dict)
for msg in self.log_handler.extra_annotations:
span_id = msg['parent_span_id'] or self.zipkin_attrs.span_id
# This should check if these are non-None
annotations_by_span_id[span_id].update(msg['annotations'])
binary_annotations_by_span_id[span_id].update(
msg['binary_annotations']
)
# Collect, annotate, and log client spans from the logging handler
for span in self.log_handler.client_spans:
# The parent_span_id is either the parent ID set in the
# logging handler or the current Zipkin context's span ID.
parent_span_id = (
span['parent_span_id'] or
self.zipkin_attrs.span_id
)
# A new client span's span ID can be overridden
span_id = span['span_id'] or generate_random_64bit_string()
endpoint = _encoding_helpers.copy_endpoint_with_new_service_name(
self.endpoint, span['service_name']
)
# Collect annotations both logged with the new spans and
# logged in separate log messages.
annotations = span['annotations']
annotations.update(annotations_by_span_id[span_id])
binary_annotations = span['binary_annotations']
binary_annotations.update(
binary_annotations_by_span_id[span_id])
timestamp, duration = get_local_span_timestamp_and_duration(
annotations
)
span_sender.add_span(
span_id=span_id,
parent_span_id=parent_span_id,
trace_id=self.zipkin_attrs.trace_id,
span_name=span['span_name'],
annotations=annotations,
binary_annotations=binary_annotations,
timestamp_s=timestamp,
duration_s=duration,
endpoint=endpoint,
sa_endpoint=span.get('sa_endpoint'),
)
extra_annotations = annotations_by_span_id[
self.zipkin_attrs.span_id]
extra_binary_annotations = binary_annotations_by_span_id[
self.zipkin_attrs.span_id
]
k1, k2 = ('sr', 'ss')
if self.client_context:
k1, k2 = ('cs', 'cr')
annotations = {k1: self.start_timestamp, k2: end_timestamp}
annotations.update(extra_annotations)
if self.add_logging_annotation:
annotations[LOGGING_END_KEY] = time.time()
self.binary_annotations_dict.update(extra_binary_annotations)
if self.report_root_timestamp:
timestamp = self.start_timestamp
duration = end_timestamp - self.start_timestamp
else:
timestamp = duration = None
span_sender.add_span(
span_id=self.zipkin_attrs.span_id,
parent_span_id=self.zipkin_attrs.parent_span_id,
trace_id=self.zipkin_attrs.trace_id,
span_name=self.span_name,
annotations=annotations,
binary_annotations=self.binary_annotations_dict,
timestamp_s=timestamp,
duration_s=duration,
endpoint=self.endpoint,
sa_endpoint=self.sa_endpoint,
)
def get_local_span_timestamp_and_duration(annotations):
if 'cs' in annotations and 'cr' in annotations:
return annotations['cs'], annotations['cr'] - annotations['cs']
elif 'sr' in annotations and 'ss' in annotations:
return annotations['sr'], annotations['ss'] - annotations['sr']
return None, None
class ZipkinLoggerHandler(logging.StreamHandler, object):
"""Logger Handler to log span annotations or additional client spans to
scribe. To connect to the handler, logger name must be
'py_zipkin.logger'.
:param zipkin_attrs: ZipkinAttrs namedtuple object
"""
def __init__(self, zipkin_attrs):
super(ZipkinLoggerHandler, self).__init__()
# If parent_span_id is set, the application is in a logging context
# where each additional client span logged has this span as its parent.
# This is to allow logging of hierarchies of spans instead of just
# single client spans. See the SpanContext class.
self.parent_span_id = None
self.zipkin_attrs = zipkin_attrs
self.client_spans = []
self.extra_annotations = []
def store_local_span(
self,
span_name,
service_name,
annotations,
binary_annotations,
sa_endpoint,
span_id=None,
):
"""Convenience method for storing a local child span (a zipkin_span
inside other zipkin_spans) to be logged when the outermost zipkin_span
exits.
"""
self.client_spans.append({
'span_name': span_name,
'service_name': service_name,
'parent_span_id': self.parent_span_id,
'span_id': span_id,
'annotations': annotations,
'binary_annotations': binary_annotations,
'sa_endpoint': sa_endpoint,
})
def emit(self, record):
"""Handle each record message. This function is called whenever
zipkin_logger.debug() is called.
:param record: object containing the `msg` object.
Structure of record.msg should be the following:
::
{
"annotations": {
"cs": ts1,
"cr": ts2,
},
"binary_annotations": {
"http.uri": "/foo/bar",
},
"name": "foo_span",
"service_name": "myService",
}
Keys:
- annotations: str -> timestamp annotations
- binary_annotations: str -> str binary annotations
(One of either annotations or binary_annotations is required)
- name: str of new span name; only used if service-name is also
specified.
- service_name: str of new client span's service name.
If service_name is specified, this log msg is considered to
represent a new client span. If service_name is omitted, this is
considered additional annotation for the currently active
"parent span" (either the server span or the parent client span
inside a SpanContext).
"""
if not self.zipkin_attrs.is_sampled:
return
span_name = record.msg.get('name', 'span')
annotations = record.msg.get('annotations', {})
binary_annotations = record.msg.get('binary_annotations', {})
if not annotations and not binary_annotations:
raise ZipkinError(
"At least one of annotation/binary annotation has"
" to be provided for {0} span".format(span_name)
)
service_name = record.msg.get('service_name', None)
# Presence of service_name means this is to be a new local span.
if service_name is not None:
self.store_local_span(
span_name=span_name,
service_name=service_name,
annotations=annotations,
binary_annotations=binary_annotations,
sa_endpoint=None,
)
else:
self.extra_annotations.append({
'annotations': annotations,
'binary_annotations': binary_annotations,
'parent_span_id': self.parent_span_id,
})
class ZipkinBatchSender(object):
MAX_PORTION_SIZE = 100
def __init__(self, transport_handler, max_portion_size=None):
self.transport_handler = transport_handler
self.max_portion_size = max_portion_size or self.MAX_PORTION_SIZE
if isinstance(self.transport_handler, BaseTransportHandler):
self.max_payload_bytes = self.transport_handler.get_max_payload_bytes()
else:
self.max_payload_bytes = None
def __enter__(self):
self._reset_queue()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
if any((_exc_type, _exc_value, _exc_traceback)):
error = '{0}: {1}'.format(_exc_type.__name__, _exc_value)
raise ZipkinError(error)
else:
self.flush()
def _reset_queue(self):
self.queue = []
self.current_size = thrift.LIST_HEADER_SIZE
def add_span(
self,
span_id,
parent_span_id,
trace_id,
span_name,
annotations,
binary_annotations,
timestamp_s,
duration_s,
endpoint,
sa_endpoint,
):
thrift_endpoint = thrift.create_endpoint(
endpoint.port,
endpoint.service_name,
endpoint.ipv4,
endpoint.ipv6,
)
thrift_annotations = thrift.annotation_list_builder(
annotations,
thrift_endpoint,
)
# Binary annotations can be set through debug messages or the
# set_extra_binary_annotations registry setting.
thrift_binary_annotations = thrift.binary_annotation_list_builder(
binary_annotations,
thrift_endpoint,
)
# Add sa binary annotation
if sa_endpoint is not None:
thrift_sa_endpoint = thrift.create_endpoint(
sa_endpoint.port,
sa_endpoint.service_name,
sa_endpoint.ipv4,
sa_endpoint.ipv6,
)
thrift_binary_annotations.append(thrift.create_binary_annotation(
key=thrift.zipkin_core.SERVER_ADDR,
value=thrift.SERVER_ADDR_VAL,
annotation_type=thrift.zipkin_core.AnnotationType.BOOL,
host=thrift_sa_endpoint,
))
thrift_span = thrift.create_span(
span_id,
parent_span_id,
trace_id,
span_name,
thrift_annotations,
thrift_binary_annotations,
timestamp_s,
duration_s,
)
encoded_span = thrift.span_to_bytes(thrift_span)
# If we've already reached the max batch size or the new span doesn't
# fit in max_payload_bytes, send what we've collected until now and
# start a new batch.
is_over_size_limit = (
self.max_payload_bytes is not None and
self.current_size + len(encoded_span) > self.max_payload_bytes
)
is_over_portion_limit = len(self.queue) >= self.max_portion_size
if is_over_size_limit or is_over_portion_limit:
self.flush()
self.queue.append(encoded_span)
self.current_size += len(encoded_span)
def flush(self):
if self.transport_handler and len(self.queue) > 0:
message = thrift.encode_bytes_list(self.queue)
self.transport_handler(message)
self._reset_queue()
| 36.783455 | 83 | 0.611853 |
ab05fa79323c7f161ad27b944b78ebb40e546882 | 4,411 | py | Python | circuit-python/code.py | aws-samples/aws-serverless-pyportal-mars-weather-display | 156dddc4f971e40368a5e45ba7420b76e6fff276 | [
"MIT-0"
] | 7 | 2020-05-17T13:54:58.000Z | 2022-03-27T20:57:34.000Z | circuit-python/code.py | aws-samples/aws-serverless-pyportal-mars-weather-display | 156dddc4f971e40368a5e45ba7420b76e6fff276 | [
"MIT-0"
] | 2 | 2020-08-01T12:56:44.000Z | 2021-01-16T21:03:31.000Z | circuit-python/code.py | aws-samples/aws-serverless-pyportal-mars-weather-display | 156dddc4f971e40368a5e45ba7420b76e6fff276 | [
"MIT-0"
] | 1 | 2021-11-03T01:50:43.000Z | 2021-11-03T01:50:43.000Z | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_pyportal
import displayio
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
font = bitmap_font.load_font("/fonts/OstrichSans-Heavy-18.bdf")
# font terminalio.FONT
# import terminalio
pyportal = adafruit_pyportal.PyPortal()
display = board.DISPLAY
splash = displayio.Group(max_size=9)
bg_group = displayio.Group(max_size=1)
btm_view = displayio.Group(max_size=9, x=20, y=140)
top_view = displayio.Group(max_size=9, x=20, y=20)
splash.append(bg_group)
splash.append(btm_view)
splash.append(top_view)
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
interval_minutes = secrets['interval_minutes']
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(pyportal._esp, secrets, None)
def callAPIEndpoint(mars_api_url):
headers = {"x-api-key": secrets['mars_api_key']}
response = wifi.get(mars_api_url, headers=headers, timeout=30)
data = response.json()
print("JSON Response: ", data)
response.close()
return data
def downloadImage(url):
max_retries = 5
for i in range(max_retries):
try:
pyportal.wget(url, '/sd/cache.bmp', chunk_size=4096)
except OSError as error:
print(error)
print("""\n\nNo writable filesystem found. Insert an SD card.""")
continue
except RuntimeError as error:
print(error)
print("wget didn't write a complete file")
continue
break
def setText(group, text, value, x, y):
text = text + str(value)
status_label = Label(font, text=text, color=0xa1251b, max_glyphs=200)
status_label.x = x
status_label.y = y
group.append(status_label)
return
def clearGroup(group):
if group:
size = len(group) - 1
for x in range(0, size):
group.pop()
def setInsight(insight):
clearGroup(btm_view)
clearGroup(top_view)
setText(btm_view, 'Sol: ', insight['sol'], 0, 0)
setText(btm_view, 'Avg Air Temp: ', insight['av_at'], 0, 20)
setText(btm_view, 'Avg Wind Speed: ', insight['av_HWS'], 0, 40)
setText(btm_view, 'Avg Pressure: ', insight['av_PRE'], 0, 60)
setText(top_view, 'Last_UTC: ', insight['Last_UTC'], 0,0)
setText(top_view, 'Martian Season: ', insight['season'], 0,20)
return
def showDisplay(insight, displayTime=60.0):
print('Setting background')
with open("/sd/cache.bmp", "rb") as bitmap_file:
bitmap = displayio.OnDiskBitmap(bitmap_file)
tile_grid = displayio.TileGrid(bitmap, pixel_shader=displayio.ColorConverter())
if bg_group:
bg_group.pop()
bg_group.append(tile_grid)
setInsight(insight)
display.show(splash)
start = time.monotonic()
while time.monotonic() - start < displayTime:
pass
while True:
data = callAPIEndpoint(secrets['mars_api_url'])
downloadImage(data['image_url'])
showDisplay(data['insight'], displayTime=60*interval_minutes)
| 34.732283 | 88 | 0.687599 |
c0bcc4a462cfe4a02bbf8809fa40ecda2481f1cc | 3,738 | py | Python | tests/devices/test_renpho_rp_ap001s.py | pascaltippelt/tuya-local | 03f4a8135e085081b30e6e0abd3ba4d3e4ae491e | [
"MIT"
] | null | null | null | tests/devices/test_renpho_rp_ap001s.py | pascaltippelt/tuya-local | 03f4a8135e085081b30e6e0abd3ba4d3e4ae491e | [
"MIT"
] | null | null | null | tests/devices/test_renpho_rp_ap001s.py | pascaltippelt/tuya-local | 03f4a8135e085081b30e6e0abd3ba4d3e4ae491e | [
"MIT"
] | null | null | null | from homeassistant.components.fan import SUPPORT_PRESET_MODE
from homeassistant.components.light import COLOR_MODE_ONOFF
from homeassistant.components.lock import STATE_LOCKED, STATE_UNLOCKED
from homeassistant.const import STATE_UNAVAILABLE
from ..const import RENPHO_PURIFIER_PAYLOAD
from ..helpers import assert_device_properties_set
from .base_device_tests import (
BasicLightTests,
BasicLockTests,
BasicSwitchTests,
SwitchableTests,
TuyaDeviceTestCase,
)
SWITCH_DPS = "1"
PRESET_DPS = "4"
LOCK_DPS = "7"
LIGHT_DPS = "8"
TIMER_DPS = "19"
QUALITY_DPS = "22"
SLEEP_DPS = "101"
PREFILTER_DPS = "102"
CHARCOAL_DPS = "103"
ACTIVATED_DPS = "104"
HEPA_DPS = "105"
class TestRenphoPurifier(
BasicLightTests,
BasicLockTests,
BasicSwitchTests,
SwitchableTests,
TuyaDeviceTestCase,
):
__test__ = True
def setUp(self):
self.setUpForConfig("renpho_rp_ap001s.yaml", RENPHO_PURIFIER_PAYLOAD)
self.subject = self.entities.get("fan")
self.setUpSwitchable(SWITCH_DPS, self.subject)
self.setUpBasicLight(LIGHT_DPS, self.entities.get("light_aq_indicator"))
self.setUpBasicLock(LOCK_DPS, self.entities.get("lock_child_lock"))
self.setUpBasicSwitch(SLEEP_DPS, self.entities.get("switch_sleep"))
def test_supported_features(self):
self.assertEqual(self.subject.supported_features, SUPPORT_PRESET_MODE)
def test_preset_modes(self):
self.assertCountEqual(
self.subject.preset_modes,
["low", "mid", "high", "auto"],
)
def test_preset_mode(self):
self.dps[PRESET_DPS] = "low"
self.assertEqual(self.subject.preset_mode, "low")
self.dps[PRESET_DPS] = "mid"
self.assertEqual(self.subject.preset_mode, "mid")
self.dps[PRESET_DPS] = "high"
self.assertEqual(self.subject.preset_mode, "high")
self.dps[PRESET_DPS] = "auto"
self.assertEqual(self.subject.preset_mode, "auto")
async def test_set_preset_mode_to_low(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "low"},
):
await self.subject.async_set_preset_mode("low")
async def test_set_preset_mode_to_mid(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "mid"},
):
await self.subject.async_set_preset_mode("mid")
async def test_set_preset_mode_to_high(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "high"},
):
await self.subject.async_set_preset_mode("high")
async def test_set_preset_mode_to_auto(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: "auto"},
):
await self.subject.async_set_preset_mode("auto")
def test_device_state_attributes(self):
self.dps[TIMER_DPS] = "19"
self.dps[QUALITY_DPS] = "22"
self.dps[PREFILTER_DPS] = 102
self.dps[CHARCOAL_DPS] = 103
self.dps[ACTIVATED_DPS] = 104
self.dps[HEPA_DPS] = 105
self.assertDictEqual(
self.subject.device_state_attributes,
{
"timer": "19",
"air_quality": "22",
"prefilter_life": 102,
"charcoal_filter_life": 103,
"activated_charcoal_filter_life": 104,
"hepa_filter_life": 105,
},
)
self.assertEqual(self.basicSwitch.icon, "mdi:power-sleep")
| 32.789474 | 81 | 0.635099 |
9d1a43588ff6c5f584151404ce877f43b0b65bc2 | 8,278 | py | Python | thread_queue.py | timmartin19/thread-queue | 6e69144fe1bc7f4246ff25ea93eeeeaca0c7c268 | [
"MIT"
] | null | null | null | thread_queue.py | timmartin19/thread-queue | 6e69144fe1bc7f4246ff25ea93eeeeaca0c7c268 | [
"MIT"
] | null | null | null | thread_queue.py | timmartin19/thread-queue | 6e69144fe1bc7f4246ff25ea93eeeeaca0c7c268 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from threading import Thread
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
__author__ = 'Tim Martin'
__email__ = 'tim@timmartin.me'
__version__ = '0.2.2'
LOG = logging.getLogger(__name__)
t = Thread()
class QueueNotEmptyException(Exception):
"""
Raised when items from the queue have not
been processed, likely due to an error in
the underlying threads
"""
def __init__(self, message, items, exceptions):
self._unattempted_tasks = items
self.thread_exceptions = exceptions
super(QueueNotEmptyException, self).__init__(message)
@property
def all_unprocessed_tasks(self):
tasks = list(self.unattempted_tasks)
thread_task_excs = [exc.task for exc in self.thread_exceptions
if isinstance(exc, ThreadTaskException) and exc.task is not None]
return tasks + thread_task_excs
@property
def unattempted_tasks(self):
return [task for task in self._unattempted_tasks if task is not None]
class ThreadTaskException(Exception):
"""
Wrapper for exceptions that occur within the
underlying threads
"""
def __init__(self, message, exc, task=None):
self.__cause__ = exc
self.task = task
super(ThreadTaskException, self).__init__(message)
class ThreadQueue(object):
"""
An object for safely processing a queue
using a fixed number of threads
Example:
..code-block:: python
from thread_queue import ThreadQueue
def worker(arg, keyword=5):
print('arg = {0}, keyword = {1}'.format(arg, keyword))
with ThreadQueue(worker) as tq:
for i in range(10):
tq.load(i, i*10)
# Would print in no particular order (because it's threaded)
# arg = 0, keyword = 0
# arg = 1, keyword = 10
# arg = 2, keyword = 20
# ...
# arg = 9, keyword = 90
"""
def __init__(self, worker,
thread_count=10,
initialize_thread=None,
initialization_args=None,
initialization_kwargs=None,
cleanup_thread=None,
queue=None,
response_queue=None):
"""
:param function worker: The function to call from the
generated threads. This will take the same arguments
as are added to the ``ThreadQueue.load`` method. If you
call ``ThreadQueue(my_job).load(1, keyword=2)`` this
function would be effectively equivalent to calling
``my_job(1, keyword=2)``. The one caveat is if
``initialize_thread`` is set. In that case the return
value will be prepended to the arguments.
``ThreadQueue(my_job, initialize_thread=lambda: 'initial').load(1, keyword=2)``
is equivalent to ``my_job('initial', 1, keyword=2)
:param int thread_count: The number of threads to instantiate
:param function initialize_thread: A function to call immediately
after a thread has been initialized. The return value will
be prepended to the args sent to worker
:param tuple initialization_args: Arguments to pass to the ``initialize_thread``
function
:param dict initialization_kwargs: Keyword arguments to pass to
``initialize_thread``
:param function cleanup_thread: Called when the thread is about
to finish. It will always be called even in the event of an exception.
If ``initialize_thread`` is set, then the return value of that function
will be passed to ``cleanup_thread``
:param Queue queue: Defaults to ``queue.Queue()``. An
object that implements a ``Queue`` like interface.
It must include at least ``get``, ``put``, and ``join``
methods.
"""
self.thread_count = thread_count
self._queue = queue or Queue()
self.response_queue = queue or Queue()
self._exc_queue = None
self.initialize_thread = initialize_thread
self.worker = worker
self.initialization_args = initialization_args or []
self.initialization_kwargs = initialization_kwargs or {}
self.cleanup_thread = cleanup_thread
self._threads = []
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def start(self):
"""
Initializes the threads that the queue will be using
"""
LOG.debug('Starting ThreadQueue threads')
self._exc_queue = Queue()
for i in range(self.thread_count):
worker_args = [self._queue, self.initialize_thread,
self.worker, self.initialization_args,
self.initialization_kwargs, self.cleanup_thread,
self._exc_queue, self.response_queue]
thread = Thread(target=_do_work, args=worker_args)
thread.start()
self._threads.append(thread)
def load(self, *args, **kwargs):
"""
Loads a set of arguments to pass to the threads
via the queue. The arguments will be passed to
the ``worker`` function exactly as specified here unless
``initiliaze_thread`` is set. In which case, the return
value from initialize_thread will be prepended to the arguments
:param tuple args:
:param dict kwargs:
"""
self._queue.put(tuple([args, kwargs]))
def close(self):
"""
Waits for the queue to empty and then
joins the threads
"""
for i in range(self.thread_count):
self._queue.put(None)
for thread in self._threads:
thread.join()
unfinished_tasks = empty_queue(self._queue)
thread_errors = empty_queue(self._exc_queue)
if unfinished_tasks or thread_errors:
raise QueueNotEmptyException('The ThreadQueue did not finish all tasks',
unfinished_tasks, thread_errors)
LOG.debug('Closed all ThreadQueue threads')
def empty_queue(queue):
"""
:param Queue queue:
:return:
:rtype: list
"""
all_items = []
while True:
try:
all_items.append(queue.get_nowait())
except Empty:
return all_items
def _do_work(q, initialize_thread, worker, args, kwargs, cleanup_thread, exc_queue, response_queue):
try:
extra = None
if initialize_thread:
LOG.debug('Initializing thread')
extra = initialize_thread(*args, **kwargs)
else:
LOG.debug('Skipping thread initialization')
try:
_worker_loop(q, worker, response_queue,
extra=extra, has_extra=initialize_thread is not None)
finally:
if cleanup_thread is not None:
LOG.debug('Cleaning up thread')
if initialize_thread:
cleanup_thread(extra)
else:
cleanup_thread()
except Exception as exc:
LOG.warning('Exception in ThreadQueue thread', exc_info=True)
exc_queue.put(exc)
raise
def _worker_loop(queue, worker, response_queue, extra=None, has_extra=False):
while True:
item = queue.get()
try:
if item is None:
LOG.debug('Found break request from parent. Finishing work')
break
LOG.debug('Beginning task')
if has_extra:
resp = worker(extra, *item[0], **item[1])
else:
resp = worker(*item[0], **item[1])
response_queue.put(resp)
LOG.debug('Finished task')
queue.task_done()
except Exception as exc:
raise ThreadTaskException('An exception occurred while processing a task',
exc, task=item)
| 34.635983 | 100 | 0.604494 |
211cc869ce2d1d99371136fa8f6ae9c143f40171 | 44,705 | py | Python | tensorflow/python/ops/summary_ops_v2.py | xykong1958/tensorflow | d8fe10aae9e6be9cd49ab7e68c1ca4989f0be42b | [
"Apache-2.0"
] | 3 | 2016-08-20T04:02:24.000Z | 2019-04-21T06:18:41.000Z | tensorflow/python/ops/summary_ops_v2.py | xiyihong/tensorflow | f90532431c3785166cff35ff427b652fe460f60b | [
"Apache-2.0"
] | 2 | 2018-03-23T00:03:36.000Z | 2018-03-28T23:50:51.000Z | tensorflow/python/ops/summary_ops_v2.py | xiyihong/tensorflow | f90532431c3785166cff35ff427b652fe460f60b | [
"Apache-2.0"
] | 1 | 2019-10-31T09:22:30.000Z | 2019-10-31T09:22:30.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations to emit summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import getpass
import os
import re
import threading
import time
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler as _profiler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_summary_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# Name for graph collection of summary writer init ops, which is only exposed
# as a legacy API for tf.contrib.summary in TF 1.x.
_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
_EXPERIMENT_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,256}$")
_RUN_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,512}$")
_USER_NAME_PATTERNS = re.compile(r"^[a-z]([-a-z0-9]{0,29}[a-z0-9])?$", re.I)
def _should_record_summaries_internal(default_state):
"""Returns boolean Tensor if summaries should/shouldn't be recorded.
Now the summary condition is decided by logical "and" of two conditions:
ctx.summary_recording and ctx.summary_recording_distribution_strategy. The
former one is usually set by user, and the latter one is controlled by
DistributionStrategy (tf.distribute.ReplicaContext).
Args:
default_state: can be True or False. The default summary behavior when user
does not specify ctx.summary_recording and
ctx.summary_recording_distribution_strategy is True.
"""
ctx = context.context()
resolve = lambda x: x() if callable(x) else x
cond_distributed = resolve(ctx.summary_recording_distribution_strategy)
cond = resolve(ctx.summary_recording)
if cond is None:
cond = default_state
return math_ops.logical_and(cond_distributed, cond)
def _should_record_summaries_v2():
"""Returns boolean Tensor which is true if summaries should be recorded.
If no recording status has been set, this defaults to True, unlike the public
should_record_summaries().
"""
return _should_record_summaries_internal(default_state=True)
def should_record_summaries():
"""Returns boolean Tensor which is true if summaries should be recorded."""
return _should_record_summaries_internal(default_state=False)
@tf_export("summary.record_if", v1=[])
@tf_contextlib.contextmanager
def record_if(condition):
"""Sets summary recording on or off per the provided boolean value.
The provided value can be a python boolean, a scalar boolean Tensor, or
or a callable providing such a value; if a callable is passed it will be
invoked on-demand to determine whether summary writing will occur.
Args:
condition: can be True, False, a bool Tensor, or a callable providing such.
Yields:
Returns a context manager that sets this value on enter and restores the
previous value on exit.
"""
old = context.context().summary_recording
try:
context.context().summary_recording = condition
yield
finally:
context.context().summary_recording = old
# TODO(apassos) consider how to handle local step here.
def record_summaries_every_n_global_steps(n, global_step=None):
"""Sets the should_record_summaries Tensor to true if global_step % n == 0."""
if global_step is None:
global_step = training_util.get_or_create_global_step()
with ops.device("cpu:0"):
should = lambda: math_ops.equal(global_step % n, 0)
if not context.executing_eagerly():
should = should()
return record_if(should)
def always_record_summaries():
"""Sets the should_record_summaries Tensor to always true."""
return record_if(True)
def never_record_summaries():
"""Sets the should_record_summaries Tensor to always false."""
return record_if(False)
@tf_export("summary.experimental.get_step", v1=[])
def get_step():
"""Returns the default summary step for the current thread.
Returns:
The step set by `tf.summary.experimental.set_step()` if one has been set,
otherwise None.
"""
return context.context().summary_step
@tf_export("summary.experimental.set_step", v1=[])
def set_step(step):
"""Sets the default summary step for the current thread.
For convenience, this function sets a default value for the `step` parameter
used in summary-writing functions elsewhere in the API so that it need not
be explicitly passed in every such invocation. The value can be a constant
or a variable, and can be retrieved via `tf.summary.experimental.get_step()`.
Note: when using this with @tf.functions, the step value will be captured at
the time the function is traced, so changes to the step outside the function
will not be reflected inside the function unless using a `tf.Variable` step.
Args:
step: An `int64`-castable default step value, or None to unset.
"""
context.context().summary_step = step
@tf_export("summary.SummaryWriter", v1=[])
@six.add_metaclass(abc.ABCMeta)
class SummaryWriter(object):
"""Interface representing a stateful summary writer object."""
@abc.abstractmethod
def set_as_default(self):
"""Enables this summary writer for the current thread."""
raise NotImplementedError()
@abc.abstractmethod
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
raise NotImplementedError()
def init(self):
"""Initializes the summary writer."""
raise NotImplementedError()
def flush(self):
"""Flushes any buffered data."""
raise NotImplementedError()
def close(self):
"""Flushes and closes the summary writer."""
raise NotImplementedError()
class ResourceSummaryWriter(SummaryWriter):
"""Implementation of SummaryWriter using a SummaryWriterInterface resource."""
def __init__(self, shared_name, init_op_fn, name=None, v2=False):
self._resource = gen_summary_ops.summary_writer(
shared_name=shared_name, name=name)
# TODO(nickfelt): cache other constructed ops in graph mode
self._init_op_fn = init_op_fn
self._init_op = init_op_fn(self._resource)
self._v2 = v2
self._closed = False
if context.executing_eagerly():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="cpu:0")
else:
ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op)
def set_as_default(self):
"""Enables this summary writer for the current thread."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
context.context().summary_writer = self
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
old = context.context().summary_writer
try:
context.context().summary_writer = self
yield self
# Flushes the summary writer in eager mode or in graph functions, but
# not in legacy graph mode (you're on your own there).
self.flush()
finally:
context.context().summary_writer = old
def init(self):
"""Initializes the summary writer."""
if self._v2:
if context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
return self._init_op
# Legacy behavior allows re-initializing the resource.
return self._init_op_fn(self._resource)
def flush(self):
"""Flushes any buffered data."""
if self._v2 and context.executing_eagerly() and self._closed:
return
return _flush_fn(writer=self)
def close(self):
"""Flushes and closes the summary writer."""
if self._v2 and context.executing_eagerly() and self._closed:
return
try:
with ops.control_dependencies([self.flush()]):
with ops.device("cpu:0"):
return gen_summary_ops.close_summary_writer(self._resource)
finally:
if self._v2 and context.executing_eagerly():
self._closed = True
class NoopSummaryWriter(SummaryWriter):
"""A summary writer that does nothing, for create_noop_writer()."""
def set_as_default(self):
pass
@tf_contextlib.contextmanager
def as_default(self):
yield
def init(self):
pass
def flush(self):
pass
def close(self):
pass
@tf_export(v1=["summary.initialize"])
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This operation is a no-op when executing eagerly.
This helper method provides a higher-level alternative to using
`tf.contrib.summary.summary_writer_initializer_op` and
`tf.contrib.summary.graph`.
Most users will also want to call `tf.compat.v1.train.create_global_step`
which can happen before or after this function is called.
Args:
graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call `tf.Session.run`. This defaults
to `tf.compat.v1.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
`tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session.
"""
if context.executing_eagerly():
return
if context.context().summary_writer is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("session must be passed if no default session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(_graph(x, 0), feed_dict={x: data})
@tf_export("summary.create_file_writer", v1=[])
def create_file_writer_v2(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object.
"""
if logdir is None:
raise ValueError("logdir cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
# Prepend the PID and a process-local UID to the filename suffix to avoid
# filename collisions within the machine (the filename already contains
# the hostname to avoid cross-machine collisions).
unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
filename_suffix = unique_prefix + filename_suffix
# Use a unique shared_name to prevent resource sharing.
if context.executing_eagerly():
shared_name = context.shared_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return ResourceSummaryWriter(
shared_name=shared_name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix),
name=name,
v2=True)
def create_file_writer(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context under the given name.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: Shared name for this SummaryWriter resource stored to default
Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
summary writer resource with this shared name already exists, the returned
SummaryWriter wraps that resource and the other arguments have no effect.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return NoopSummaryWriter()
logdir = str(logdir)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
if name is None:
name = "logdir:" + logdir
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix))
def create_db_writer(db_uri,
experiment_name=None,
run_name=None,
user_name=None,
name=None):
"""Creates a summary database writer in the current context.
This can be used to write tensors from the execution graph directly
to a database. Only SQLite is supported right now. This function
will create the schema if it doesn't exist. Entries in the Users,
Experiments, and Runs tables will be created automatically if they
don't already exist.
Args:
db_uri: For example "file:/tmp/foo.sqlite".
experiment_name: Defaults to YYYY-MM-DD in local time if None.
Empty string means the Run will not be associated with an
Experiment. Can't contain ASCII control characters or <>. Case
sensitive.
run_name: Defaults to HH:MM:SS in local time if None. Empty string
means a Tag will not be associated with any Run. Can't contain
ASCII control characters or <>. Case sensitive.
user_name: Defaults to system username if None. Empty means the
Experiment will not be associated with a User. Must be valid as
both a DNS label and Linux username.
name: Shared name for this SummaryWriter resource stored to default
`tf.Graph`.
Returns:
A `tf.summary.SummaryWriter` instance.
"""
with ops.device("cpu:0"):
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d", time.localtime(time.time()))
if run_name is None:
run_name = time.strftime("%H:%M:%S", time.localtime(time.time()))
if user_name is None:
user_name = getpass.getuser()
experiment_name = _cleanse_string(
"experiment_name", _EXPERIMENT_NAME_PATTERNS, experiment_name)
run_name = _cleanse_string("run_name", _RUN_NAME_PATTERNS, run_name)
user_name = _cleanse_string("user_name", _USER_NAME_PATTERNS, user_name)
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_db_writer,
db_uri=db_uri,
experiment_name=experiment_name,
run_name=run_name,
user_name=user_name))
@tf_export("summary.create_noop_writer", v1=[])
def create_noop_writer():
"""Returns a summary writer that does nothing.
This is useful as a placeholder in code that expects a context manager.
"""
return NoopSummaryWriter()
def _cleanse_string(name, pattern, value):
if isinstance(value, six.string_types) and pattern.search(value) is None:
raise ValueError("%s (%s) must match %s" % (name, value, pattern.pattern))
return ops.convert_to_tensor(value, dtypes.string)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
def all_summary_ops():
"""Graph-mode only. Returns all summary ops.
Please note this excludes `tf.summary.graph` ops.
Returns:
The summary ops.
"""
if context.executing_eagerly():
return None
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]")
@tf_export("summary.experimental.summary_scope", v1=[])
@tf_contextlib.contextmanager
def summary_scope(name, default_name="summary", values=None):
"""Experimental context manager for use when defining a custom summary op.
This behaves similarly to `tf.name_scope`, except that it returns a generated
summary tag in addition to the scope name. The tag is structurally similar to
the scope name - derived from the user-provided name, prefixed with enclosing
name scopes if any - but we relax the constraint that it be uniquified, as
well as the character set limitation (so the user-provided name can contain
characters not legal for scope names; in the scope name these are removed).
This makes the summary tag more predictable and consistent for the user.
For example, to define a new summary op called `my_op`:
```python
def my_op(name, my_value, step):
with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope):
my_value = tf.convert_to_tensor(my_value)
return tf.summary.write(tag, my_value, step=step)
```
Args:
name: string name for the summary.
default_name: Optional; if provided, used as default name of the summary.
values: Optional; passed as `values` parameter to name_scope.
Yields:
A tuple `(tag, scope)` as described above.
"""
name = name or default_name
current_scope = ops.get_name_scope()
tag = current_scope + "/" + name if current_scope else name
# Strip illegal characters from the scope name, and if that leaves nothing,
# use None instead so we pick up the default name.
name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
with ops.name_scope(name, default_name, values) as scope:
yield tag, scope
@tf_export("summary.write", v1=[])
def write(tag, tensor, step=None, metadata=None, name=None):
"""Writes a generic summary to the default SummaryWriter if one exists.
This exists primarily to support the definition of type-specific summary ops
like scalar() and image(), and is not intended for direct use unless defining
a new type-specific summary op.
Args:
tag: string tag used to identify the summary (e.g. in TensorBoard), usually
generated with `tf.summary.summary_scope`
tensor: the Tensor holding the summary data to write
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
metadata: Optional SummaryMetadata, as a proto or serialized bytes
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_summary") as scope:
if context.context().summary_writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
if metadata is None:
serialized_metadata = b""
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = metadata.SerializeToString()
else:
serialized_metadata = metadata
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
write_summary_op = gen_summary_ops.write_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
step,
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
with ops.control_dependencies([write_summary_op]):
return constant_op.constant(True)
return smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
@tf_export("summary.experimental.write_raw_pb", v1=[])
def write_raw_pb(tensor, step=None, name=None):
"""Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.
Experimental: this exists to support the usage of V1-style manual summary
writing (via the construction of a `tf.compat.v1.Summary` protocol buffer)
with the V2 summary writing API.
Args:
tensor: the string Tensor holding one or more serialized `Summary` protobufs
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_raw_pb") as scope:
if context.context().summary_writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
raw_summary_op = gen_summary_ops.write_raw_proto_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
step,
array_ops.identity(tensor),
name=scope)
with ops.control_dependencies([raw_summary_op]):
return constant_op.constant(True)
return smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
name_scope = ops.get_name_scope()
if name_scope:
# Add a slash to allow reentering the name scope.
name_scope += "/"
def record():
with ops.name_scope(name_scope), summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if context.context().summary_writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def generic(name, tensor, metadata=None, family=None, step=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
if metadata is None:
serialized_metadata = constant_op.constant("")
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = constant_op.constant(metadata.SerializeToString())
else:
serialized_metadata = metadata
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
_choose_step(step),
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
Unlike `tf.contrib.summary.generic` this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
tensor: A `tf.Tensor` Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
to `tf.compat.v1.train.get_global_step`.
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
"""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None, step=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
"""Writes an image summary if possible."""
def function(tag, scope):
bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
bad_color_,
max_images,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
context.context().summary_writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def graph(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
like `tf.summary.should_record_summaries` do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
the `graph` parameter to `tf.compat.v1.summary.initialize` instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
param: A `tf.Tensor` containing a serialized graph proto. When
eager execution is enabled, this function will automatically
coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
Raises:
TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
"""
if not context.executing_eagerly() and not isinstance(param, ops.Tensor):
raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
"mode, but was: %s" % type(param))
writer = context.context().summary_writer
if writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
else:
tensor = array_ops.identity(param)
return gen_summary_ops.write_graph_summary(
writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access
_graph = graph # for functions with a graph parameter
def import_event(tensor, name=None):
"""Writes a `tf.compat.v1.Event` binary proto.
This can be used to import existing event logs into a new summary writer sink.
Please note that this is lower level than the other summary functions and
will ignore the `tf.summary.should_record_summaries` setting.
Args:
tensor: A `tf.Tensor` of type `string` containing a serialized
`tf.compat.v1.Event` proto.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
return gen_summary_ops.import_event(
context.context().summary_writer._resource, tensor, name=name) # pylint: disable=protected-access
@tf_export("summary.flush", v1=[])
def flush(writer=None, name=None):
"""Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The `tf.summary.SummaryWriter` resource to flush.
The thread default will be used if this parameter is None.
Otherwise a `tf.no_op` is returned.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
if writer is None:
writer = context.context().summary_writer
if writer is None:
return control_flow_ops.no_op()
if isinstance(writer, ResourceSummaryWriter):
resource = writer._resource # pylint: disable=protected-access
else:
# Assume we were passed a raw resource tensor.
resource = writer
with ops.device("cpu:0"):
return gen_summary_ops.flush_summary_writer(resource, name=name)
_flush_fn = flush # for within SummaryWriter.flush()
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
@deprecation.deprecated(date=None,
instructions="Renamed to create_file_writer().")
def create_summary_file_writer(*args, **kwargs):
"""Please use `tf.contrib.summary.create_file_writer`."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
def _serialize_graph(arbitrary_graph):
if isinstance(arbitrary_graph, ops.Graph):
return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return arbitrary_graph.SerializeToString()
def _choose_step(step):
if step is None:
return training_util.get_or_create_global_step()
if not isinstance(step, ops.Tensor):
return ops.convert_to_tensor(step, dtypes.int64)
return step
def _check_create_file_writer_args(inside_function, **kwargs):
"""Helper to check the validity of arguments to a create_file_writer() call.
Args:
inside_function: whether the create_file_writer() call is in a tf.function
**kwargs: the arguments to check, as kwargs to give them names.
Raises:
ValueError: if the arguments are graph tensors.
"""
for arg_name, arg in kwargs.items():
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tensor(arg):
if inside_function:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to create_file_writer() "
"inside an @tf.function. The create call will be lifted into the "
"outer eager execution context, so it cannot consume graph tensors "
"defined inside the function body." % (arg_name, arg))
else:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to eagerly executed "
"create_file_writer()." % (arg_name, arg))
def run_metadata(name, data, step=None):
"""Writes entire RunMetadata summary.
A RunMetadata can contain DeviceStats, partition graphs, and function graphs.
Please refer to the proto for definition of each field.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata"
# version number = 1
summary_metadata.plugin_data.content = b"1"
with summary_scope(name,
"graph_run_metadata_summary",
[data, step]) as (tag, _):
return write(
tag=tag,
tensor=constant_op.constant(
data.SerializeToString(), dtype=dtypes.string),
step=step,
metadata=summary_metadata)
def run_metadata_graphs(name, data, step=None):
"""Writes graphs from a RunMetadata summary.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph"
# version number = 1
summary_metadata.plugin_data.content = b"1"
data = config_pb2.RunMetadata(
function_graphs=data.function_graphs,
partition_graphs=data.partition_graphs)
with summary_scope(name,
"graph_run_metadata_graph_summary",
[data, step]) as (tag, _):
return write(
tag=tag,
tensor=constant_op.constant(
data.SerializeToString(), dtype=dtypes.string),
step=step,
metadata=summary_metadata)
def keras_model(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model fails
to serialze as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warn("Model failed to serialize as JSON. Ignoring... %s" % exc)
return False
with summary_scope(name, "graph_keras_model", [data, step]) as (tag, _):
return write(
tag=tag,
tensor=constant_op.constant(json_string, dtype=dtypes.string),
step=step,
metadata=summary_metadata)
_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler"))
_current_trace_context_lock = threading.Lock()
_current_trace_context = None
@tf_export("summary.trace_on", v1=[])
def trace_on(graph=True, profiler=False): # pylint: disable=redefined-outer-name
"""Starts a trace to record computation graphs and profiling information.
Must be invoked in eager mode.
When enabled, TensorFlow runtime will collection information that can later be
exported and consumed by TensorBoard. The trace is activated across the entire
TensorFlow runtime and affects all threads of execution.
To stop the trace and export the collected information, use
`tf.summary.trace_export`. To stop the trace without exporting, use
`tf.summary.trace_off`.
Args:
graph: If True, enables collection of executed graphs. It includes ones from
tf.function invocation and ones from the legacy graph mode. The default
is True.
profiler: If True, enables the advanced profiler. Enabling profiler
implicitly enables the graph collection. The profiler may incur a high
memory overhead. The default is False.
"""
if ops.inside_function():
logging.warn("Cannot enable trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Must enable trace in eager mode.")
return
global _current_trace_context
with _current_trace_context_lock:
if _current_trace_context:
logging.warn("Trace already enabled")
return
if graph and not profiler:
context.context().enable_graph_collection()
if profiler:
context.context().enable_run_metadata()
_profiler.start()
_current_trace_context = _TraceContext(graph=graph, profiler=profiler)
@tf_export("summary.trace_export", v1=[])
def trace_export(name, step=None, profiler_outdir=None):
"""Stops and exports the active trace as a Summary and/or profile file.
Stops the trace and exports all metadata collected during the trace to the
default SummaryWriter, if one has been set.
Args:
name: A name for the summary to be written.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
profiler_outdir: Output directory for profiler. It is required when profiler
is enabled when trace was started. Otherwise, it is ignored.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
# TODO(stephanlee): See if we can remove profiler_outdir and infer it from
# the SummaryWriter's logdir.
global _current_trace_context
if ops.inside_function():
logging.warn("Cannot export trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Can only export trace while executing eagerly.")
return
with _current_trace_context_lock:
if _current_trace_context is None:
raise ValueError("Must enable trace before export.")
graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name
if profiler and profiler_outdir is None:
raise ValueError("Required profiler_outdir is not specified")
run_meta = context.context().export_run_metadata()
if graph and not profiler:
run_metadata_graphs(name, run_meta, step)
else:
run_metadata(name, run_meta, step)
if profiler:
_profiler.save(profiler_outdir, _profiler.stop())
trace_off()
@tf_export("summary.trace_off", v1=[])
def trace_off():
"""Stops the current trace and discards any collected information."""
global _current_trace_context
with _current_trace_context_lock:
_current_trace_context = None
# Disabling run_metadata disables graph collection as well.
context.context().disable_run_metadata()
# profiler only has start and stop. One needs to stop in order to export
# and stopping when it is not running will raise an error.
try:
_profiler.stop()
except _profiler.ProfilerNotRunningError:
pass
| 36.58347 | 104 | 0.714394 |
71d7f95ef7fb17b904525b064c0600174e180e12 | 22,474 | py | Python | examples/full-screen/ansi-art-and-textarea.py | AnthonyDiGirolamo/python-prompt-toolkit | 4a66820d464b4f304ca3b647ff86809b20e887a4 | [
"BSD-3-Clause"
] | null | null | null | examples/full-screen/ansi-art-and-textarea.py | AnthonyDiGirolamo/python-prompt-toolkit | 4a66820d464b4f304ca3b647ff86809b20e887a4 | [
"BSD-3-Clause"
] | null | null | null | examples/full-screen/ansi-art-and-textarea.py | AnthonyDiGirolamo/python-prompt-toolkit | 4a66820d464b4f304ca3b647ff86809b20e887a4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.formatted_text import ANSI, HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout import (
FormattedTextControl,
HSplit,
Layout,
VSplit,
Window,
WindowAlign,
)
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.widgets import Dialog, Label, TextArea
def main():
# Key bindings.
kb = KeyBindings()
@kb.add("c-c")
def _(event):
"Quit when control-c is pressed."
event.app.exit()
text_area = TextArea(text="You can type here...")
dialog_body = HSplit(
[
Label(
HTML("Press <reverse>control-c</reverse> to quit."),
align=WindowAlign.CENTER,
),
VSplit(
[
Label(PROMPT_TOOLKIT_LOGO, align=WindowAlign.CENTER),
text_area,
],
),
]
)
application = Application(
layout=Layout(
container=Dialog(
title="ANSI Art demo - Art on the left, text area on the right",
body=dialog_body,
with_background=True,
),
focused_element=text_area,
),
full_screen=True,
mouse_support=True,
key_bindings=kb,
)
application.run()
PROMPT_TOOLKIT_LOGO = ANSI(
"""
\x1b[48;2;0;0;0m \x1b[m
\x1b[48;2;0;0;0m \x1b[48;2;0;249;0m\x1b[38;2;0;0;0m▀\x1b[48;2;0;209;0m▀\x1b[48;2;0;207;0m\x1b[38;2;6;34;6m▀\x1b[48;2;0;66;0m\x1b[38;2;30;171;30m▀\x1b[48;2;0;169;0m\x1b[38;2;51;35;51m▀\x1b[48;2;0;248;0m\x1b[38;2;49;194;49m▀\x1b[48;2;0;111;0m\x1b[38;2;25;57;25m▀\x1b[48;2;140;195;140m\x1b[38;2;3;17;3m▀\x1b[48;2;30;171;30m\x1b[38;2;0;0;0m▀\x1b[48;2;0;0;0m \x1b[m
\x1b[48;2;0;0;0m \x1b[48;2;77;127;78m\x1b[38;2;118;227;108m▀\x1b[48;2;216;1;13m\x1b[38;2;49;221;57m▀\x1b[48;2;26;142;76m\x1b[38;2;108;146;165m▀\x1b[48;2;26;142;90m\x1b[38;2;209;197;114m▀▀\x1b[38;2;209;146;114m▀\x1b[48;2;26;128;90m\x1b[38;2;158;197;114m▀\x1b[48;2;58;210;70m\x1b[38;2;223;152;89m▀\x1b[48;2;232;139;44m\x1b[38;2;97;121;146m▀\x1b[48;2;233;139;45m\x1b[38;2;140;188;183m▀\x1b[48;2;231;139;44m\x1b[38;2;40;168;8m▀\x1b[48;2;228;140;44m\x1b[38;2;37;169;7m▀\x1b[48;2;227;140;44m\x1b[38;2;36;169;7m▀\x1b[48;2;211;142;41m\x1b[38;2;23;171;5m▀\x1b[48;2;86;161;17m\x1b[38;2;2;174;1m▀\x1b[48;2;0;175;0m \x1b[48;2;0;254;0m\x1b[38;2;190;119;190m▀\x1b[48;2;92;39;23m\x1b[38;2;125;50;114m▀\x1b[48;2;43;246;41m\x1b[38;2;49;10;165m▀\x1b[48;2;12;128;90m\x1b[38;2;209;197;114m▀\x1b[48;2;26;128;90m▀▀▀▀\x1b[48;2;26;128;76m▀\x1b[48;2;26;128;90m\x1b[38;2;209;247;114m▀▀\x1b[38;2;209;197;114m▀\x1b[48;2;26;128;76m\x1b[38;2;209;247;114m▀\x1b[48;2;26;128;90m▀▀▀\x1b[48;2;26;128;76m▀\x1b[48;2;26;128;90m▀▀\x1b[48;2;12;128;76m▀\x1b[48;2;12;113;90m\x1b[38;2;209;247;64m▀\x1b[38;2;209;247;114m▀\x1b[48;2;12;128;90m▀\x1b[48;2;12;113;90m▀\x1b[48;2;12;113;76m\x1b[38;2;209;247;64m▀\x1b[48;2;12;128;90m▀\x1b[48;2;12;113;90m▀\x1b[48;2;12;113;76m\x1b[38;2;209;247;114m▀\x1b[48;2;12;113;90m\x1b[38;2;209;247;64m▀\x1b[48;2;26;128;90m\x1b[38;2;151;129;163m▀\x1b[48;2;115;120;103m\x1b[38;2;62;83;227m▀\x1b[48;2;138;14;25m\x1b[38;2;104;106;160m▀\x1b[48;2;0;0;57m\x1b[38;2;0;0;0m▀\x1b[m
\x1b[48;2;249;147;8m\x1b[38;2;172;69;38m▀\x1b[48;2;197;202;10m\x1b[38;2;82;192;58m▀\x1b[48;2;248;124;45m\x1b[38;2;251;131;47m▀\x1b[48;2;248;124;44m▀\x1b[48;2;248;124;45m▀▀\x1b[48;2;248;124;44m▀\x1b[48;2;248;124;45m▀\x1b[48;2;248;125;45m\x1b[38;2;251;130;47m▀\x1b[48;2;248;124;45m\x1b[38;2;252;130;47m▀\x1b[48;2;248;125;45m\x1b[38;2;252;131;47m▀\x1b[38;2;252;130;47m▀\x1b[38;2;252;131;47m▀▀\x1b[48;2;249;125;45m\x1b[38;2;255;130;48m▀\x1b[48;2;233;127;42m\x1b[38;2;190;141;35m▀\x1b[48;2;57;163;10m\x1b[38;2;13;172;3m▀\x1b[48;2;0;176;0m\x1b[38;2;0;175;0m▀\x1b[48;2;7;174;1m\x1b[38;2;35;169;7m▀\x1b[48;2;178;139;32m\x1b[38;2;220;136;41m▀\x1b[48;2;252;124;45m\x1b[38;2;253;131;47m▀\x1b[48;2;248;125;45m\x1b[38;2;251;131;47m▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;248;125;44m▀\x1b[48;2;248;135;61m\x1b[38;2;251;132;48m▀\x1b[48;2;250;173;122m\x1b[38;2;251;133;50m▀\x1b[48;2;249;155;93m\x1b[38;2;251;132;49m▀\x1b[48;2;248;132;55m\x1b[38;2;251;132;48m▀\x1b[48;2;250;173;122m\x1b[38;2;251;134;51m▀\x1b[48;2;250;163;106m\x1b[38;2;251;134;50m▀\x1b[48;2;248;128;49m\x1b[38;2;251;132;47m▀\x1b[48;2;250;166;110m\x1b[38;2;251;135;52m▀\x1b[48;2;250;175;125m\x1b[38;2;251;136;54m▀\x1b[48;2;248;132;56m\x1b[38;2;251;132;48m▀\x1b[48;2;248;220;160m\x1b[38;2;105;247;172m▀\x1b[48;2;62;101;236m\x1b[38;2;11;207;160m▀\x1b[m
\x1b[48;2;138;181;197m\x1b[38;2;205;36;219m▀\x1b[48;2;177;211;200m\x1b[38;2;83;231;105m▀\x1b[48;2;242;113;40m\x1b[38;2;245;119;42m▀\x1b[48;2;243;113;41m▀\x1b[48;2;245;114;41m▀▀▀▀▀▀▀▀\x1b[38;2;245;119;43m▀▀▀\x1b[48;2;247;114;41m\x1b[38;2;246;119;43m▀\x1b[48;2;202;125;34m\x1b[38;2;143;141;25m▀\x1b[48;2;84;154;14m\x1b[38;2;97;152;17m▀\x1b[48;2;36;166;6m▀\x1b[48;2;139;140;23m\x1b[38;2;183;133;32m▀\x1b[48;2;248;114;41m\x1b[38;2;248;118;43m▀\x1b[48;2;245;115;41m\x1b[38;2;245;119;43m▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀\x1b[38;2;245;119;42m▀\x1b[48;2;246;117;44m\x1b[38;2;246;132;62m▀\x1b[48;2;246;123;54m\x1b[38;2;249;180;138m▀\x1b[48;2;246;120;49m\x1b[38;2;247;157;102m▀\x1b[48;2;246;116;42m\x1b[38;2;246;127;54m▀\x1b[48;2;246;121;50m\x1b[38;2;248;174;128m▀\x1b[48;2;246;120;48m\x1b[38;2;248;162;110m▀\x1b[48;2;246;116;41m\x1b[38;2;245;122;47m▀\x1b[48;2;246;118;46m\x1b[38;2;248;161;108m▀\x1b[48;2;244;118;47m\x1b[38;2;248;171;123m▀\x1b[48;2;243;115;42m\x1b[38;2;246;127;54m▀\x1b[48;2;179;52;29m\x1b[38;2;86;152;223m▀\x1b[48;2;141;225;95m\x1b[38;2;247;146;130m▀\x1b[m
\x1b[48;2;50;237;108m\x1b[38;2;94;70;153m▀\x1b[48;2;206;221;133m\x1b[38;2;64;240;39m▀\x1b[48;2;233;100;36m\x1b[38;2;240;107;38m▀\x1b[48;2;114;56;22m\x1b[38;2;230;104;37m▀\x1b[48;2;24;20;10m\x1b[38;2;193;90;33m▀\x1b[48;2;21;19;9m\x1b[38;2;186;87;32m▀▀▀▀▀▀▀\x1b[38;2;186;87;33m▀▀▀\x1b[48;2;22;18;10m\x1b[38;2;189;86;33m▀\x1b[48;2;18;36;8m\x1b[38;2;135;107;24m▀\x1b[48;2;3;153;2m\x1b[38;2;5;171;1m▀\x1b[48;2;0;177;0m \x1b[48;2;4;158;2m\x1b[38;2;69;147;12m▀\x1b[48;2;19;45;8m\x1b[38;2;185;89;32m▀\x1b[48;2;22;17;10m\x1b[38;2;186;87;33m▀\x1b[48;2;21;19;9m▀▀▀▀▀▀▀▀\x1b[48;2;21;19;10m▀▀\x1b[48;2;21;19;9m▀▀▀▀\x1b[48;2;21;19;10m▀▀▀\x1b[38;2;186;87;32m▀▀\x1b[48;2;21;19;9m\x1b[38;2;186;87;33m▀\x1b[48;2;21;19;10m\x1b[38;2;186;87;32m▀▀\x1b[48;2;21;19;9m\x1b[38;2;186;87;33m▀\x1b[48;2;22;19;10m\x1b[38;2;191;89;33m▀\x1b[48;2;95;49;20m\x1b[38;2;226;103;37m▀\x1b[48;2;227;99;36m\x1b[38;2;241;109;39m▀\x1b[48;2;80;140;154m\x1b[38;2;17;240;92m▀\x1b[48;2;221;58;175m\x1b[38;2;71;14;245m▀\x1b[m
\x1b[48;2;195;38;42m\x1b[38;2;5;126;86m▀\x1b[48;2;139;230;67m\x1b[38;2;253;201;228m▀\x1b[48;2;208;82;30m\x1b[38;2;213;89;32m▀\x1b[48;2;42;26;12m\x1b[38;2;44;27;12m▀\x1b[48;2;9;14;7m\x1b[38;2;8;13;7m▀\x1b[48;2;11;15;8m\x1b[38;2;10;14;7m▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;11;12;8m\x1b[38;2;10;17;7m▀\x1b[48;2;7;71;5m\x1b[38;2;4;120;3m▀\x1b[48;2;1;164;1m\x1b[38;2;0;178;0m▀\x1b[48;2;4;118;3m\x1b[38;2;0;177;0m▀\x1b[48;2;5;108;3m\x1b[38;2;4;116;3m▀\x1b[48;2;7;75;5m\x1b[38;2;10;23;7m▀\x1b[48;2;10;33;7m\x1b[38;2;10;12;7m▀\x1b[48;2;11;13;8m\x1b[38;2;10;14;7m▀\x1b[48;2;11;14;8m▀\x1b[48;2;11;15;8m▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;10;14;7m\x1b[38;2;9;14;7m▀\x1b[48;2;30;21;10m\x1b[38;2;30;22;10m▀\x1b[48;2;195;79;29m\x1b[38;2;200;84;31m▀\x1b[48;2;205;228;23m\x1b[38;2;111;40;217m▀\x1b[48;2;9;217;69m\x1b[38;2;115;137;104m▀\x1b[m
\x1b[48;2;106;72;209m\x1b[38;2;151;183;253m▀\x1b[48;2;120;239;0m\x1b[38;2;25;2;162m▀\x1b[48;2;203;72;26m\x1b[38;2;206;77;28m▀\x1b[48;2;42;24;11m\x1b[38;2;42;25;11m▀\x1b[48;2;9;14;7m \x1b[48;2;11;15;8m \x1b[38;2;11;14;8m▀\x1b[48;2;11;13;8m\x1b[38;2;10;28;7m▀\x1b[48;2;9;36;6m\x1b[38;2;7;78;5m▀\x1b[48;2;2;153;1m\x1b[38;2;6;94;4m▀\x1b[48;2;0;178;0m\x1b[38;2;2;156;1m▀\x1b[48;2;0;175;0m\x1b[38;2;1;167;1m▀\x1b[48;2;0;177;0m\x1b[38;2;2;145;2m▀\x1b[48;2;2;147;2m\x1b[38;2;8;54;6m▀\x1b[48;2;9;38;6m\x1b[38;2;11;13;8m▀\x1b[48;2;11;13;8m\x1b[38;2;11;14;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;14;7m \x1b[48;2;29;20;10m\x1b[38;2;29;21;10m▀\x1b[48;2;190;69;25m\x1b[38;2;193;74;27m▀\x1b[48;2;136;91;148m\x1b[38;2;42;159;86m▀\x1b[48;2;89;85;149m\x1b[38;2;160;5;219m▀\x1b[m
\x1b[48;2;229;106;143m\x1b[38;2;40;239;187m▀\x1b[48;2;196;134;237m\x1b[38;2;6;11;95m▀\x1b[48;2;197;60;22m\x1b[38;2;201;67;24m▀\x1b[48;2;41;22;10m\x1b[38;2;41;23;11m▀\x1b[48;2;9;14;7m \x1b[48;2;11;15;8m \x1b[48;2;10;14;7m\x1b[38;2;11;15;8m▀▀\x1b[48;2;11;15;8m \x1b[38;2;11;14;8m▀\x1b[48;2;11;14;8m\x1b[38;2;11;16;7m▀\x1b[48;2;11;15;7m\x1b[38;2;7;79;5m▀\x1b[48;2;7;68;5m\x1b[38;2;1;164;1m▀\x1b[48;2;2;153;1m\x1b[38;2;0;176;0m▀\x1b[48;2;2;154;1m\x1b[38;2;0;175;0m▀\x1b[48;2;5;107;3m\x1b[38;2;1;171;1m▀\x1b[48;2;4;115;3m\x1b[38;2;5;105;3m▀\x1b[48;2;6;84;4m\x1b[38;2;11;18;7m▀\x1b[48;2;10;30;7m\x1b[38;2;11;13;8m▀\x1b[48;2;11;13;8m\x1b[38;2;11;15;8m▀\x1b[48;2;11;14;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;14;7m \x1b[48;2;29;19;9m\x1b[38;2;29;20;10m▀\x1b[48;2;185;58;22m\x1b[38;2;188;64;24m▀\x1b[48;2;68;241;49m\x1b[38;2;199;22;211m▀\x1b[48;2;133;139;8m\x1b[38;2;239;129;78m▀\x1b[m
\x1b[48;2;74;30;32m\x1b[38;2;163;185;76m▀\x1b[48;2;110;172;9m\x1b[38;2;177;1;123m▀\x1b[48;2;189;43;16m\x1b[38;2;193;52;19m▀\x1b[48;2;39;20;9m\x1b[38;2;40;21;10m▀\x1b[48;2;9;14;7m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m\x1b[38;2;11;15;8m▀\x1b[48;2;9;14;7m\x1b[38;2;11;14;8m▀\x1b[48;2;106;54;38m\x1b[38;2;31;24;15m▀\x1b[48;2;164;71;49m\x1b[38;2;24;20;12m▀\x1b[48;2;94;46;31m\x1b[38;2;8;14;7m▀\x1b[48;2;36;24;15m\x1b[38;2;9;14;7m▀\x1b[48;2;11;15;8m\x1b[38;2;11;14;7m▀\x1b[48;2;8;14;7m\x1b[38;2;11;15;8m▀\x1b[48;2;10;14;7m▀\x1b[48;2;11;15;8m \x1b[38;2;11;14;8m▀\x1b[48;2;11;14;8m\x1b[38;2;11;13;8m▀\x1b[48;2;11;13;8m\x1b[38;2;9;45;6m▀\x1b[48;2;10;19;7m\x1b[38;2;7;75;5m▀\x1b[48;2;6;83;4m\x1b[38;2;2;143;2m▀\x1b[48;2;2;156;1m\x1b[38;2;0;176;0m▀\x1b[48;2;0;177;0m\x1b[38;2;0;175;0m▀\x1b[38;2;3;134;2m▀\x1b[48;2;2;152;1m\x1b[38;2;9;46;6m▀\x1b[48;2;8;60;5m\x1b[38;2;11;13;8m▀\x1b[48;2;11;14;7m\x1b[38;2;11;14;8m▀\x1b[48;2;11;14;8m\x1b[38;2;11;15;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;14;7m \x1b[48;2;28;18;9m \x1b[48;2;177;43;16m\x1b[38;2;181;51;19m▀\x1b[48;2;93;35;236m\x1b[38;2;224;10;142m▀\x1b[48;2;72;51;52m\x1b[38;2;213;112;158m▀\x1b[m
\x1b[48;2;175;209;155m\x1b[38;2;7;131;221m▀\x1b[48;2;24;0;85m\x1b[38;2;44;86;152m▀\x1b[48;2;181;27;10m\x1b[38;2;185;35;13m▀\x1b[48;2;38;17;8m\x1b[38;2;39;18;9m▀\x1b[48;2;9;14;7m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m \x1b[48;2;9;14;7m \x1b[48;2;87;43;32m\x1b[38;2;114;54;39m▀\x1b[48;2;188;71;54m\x1b[38;2;211;82;59m▀\x1b[48;2;203;73;55m\x1b[38;2;204;80;57m▀\x1b[48;2;205;73;55m\x1b[38;2;178;71;51m▀\x1b[48;2;204;74;55m\x1b[38;2;119;52;37m▀\x1b[48;2;188;69;52m\x1b[38;2;54;29;19m▀\x1b[48;2;141;55;41m\x1b[38;2;16;17;9m▀\x1b[48;2;75;35;24m\x1b[38;2;8;14;7m▀\x1b[48;2;26;20;12m\x1b[38;2;10;14;7m▀\x1b[48;2;9;14;7m\x1b[38;2;11;14;7m▀\x1b[38;2;11;15;8m▀\x1b[48;2;11;14;7m▀\x1b[48;2;11;15;8m \x1b[38;2;11;14;8m▀\x1b[48;2;11;14;8m \x1b[48;2;11;13;8m\x1b[38;2;9;45;6m▀\x1b[48;2;10;23;7m\x1b[38;2;4;123;3m▀\x1b[48;2;7;75;5m\x1b[38;2;1;172;1m▀\x1b[48;2;6;84;4m\x1b[38;2;2;154;1m▀\x1b[48;2;4;114;3m\x1b[38;2;5;107;3m▀\x1b[48;2;5;103;4m\x1b[38;2;10;29;7m▀\x1b[48;2;10;23;7m\x1b[38;2;11;13;8m▀\x1b[48;2;11;14;8m\x1b[38;2;11;15;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;14;7m \x1b[48;2;27;16;8m\x1b[38;2;27;17;9m▀\x1b[48;2;170;27;10m\x1b[38;2;174;35;13m▀\x1b[48;2;118;117;199m\x1b[38;2;249;61;74m▀\x1b[48;2;10;219;61m\x1b[38;2;187;245;202m▀\x1b[m
\x1b[48;2;20;155;44m\x1b[38;2;86;54;110m▀\x1b[48;2;195;85;113m\x1b[38;2;214;171;227m▀\x1b[48;2;173;10;4m\x1b[38;2;177;19;7m▀\x1b[48;2;37;14;7m\x1b[38;2;37;16;8m▀\x1b[48;2;9;15;8m\x1b[38;2;9;14;7m▀\x1b[48;2;11;15;8m \x1b[38;2;11;14;7m▀\x1b[48;2;11;14;7m\x1b[38;2;15;17;9m▀\x1b[48;2;9;14;7m\x1b[38;2;50;29;20m▀\x1b[48;2;10;15;8m\x1b[38;2;112;47;36m▀\x1b[48;2;33;22;15m\x1b[38;2;170;61;48m▀\x1b[48;2;88;38;29m\x1b[38;2;197;66;53m▀\x1b[48;2;151;53;43m\x1b[38;2;201;67;53m▀\x1b[48;2;189;60;50m▀\x1b[48;2;198;60;51m\x1b[38;2;194;65;52m▀\x1b[38;2;160;56;44m▀\x1b[48;2;196;60;50m\x1b[38;2;99;40;30m▀\x1b[48;2;174;55;47m\x1b[38;2;41;24;16m▀\x1b[48;2;122;43;35m\x1b[38;2;12;15;8m▀\x1b[48;2;59;27;20m\x1b[38;2;8;14;7m▀\x1b[48;2;16;16;9m\x1b[38;2;10;14;7m▀\x1b[48;2;10;14;7m\x1b[38;2;11;15;8m▀\x1b[48;2;11;15;8m \x1b[38;2;11;14;8m▀\x1b[48;2;11;14;8m\x1b[38;2;11;12;8m▀\x1b[48;2;10;25;7m\x1b[38;2;7;79;5m▀\x1b[48;2;3;141;2m\x1b[38;2;1;174;1m▀\x1b[48;2;0;178;0m\x1b[38;2;1;169;1m▀\x1b[48;2;6;88;4m\x1b[38;2;8;56;6m▀\x1b[48;2;11;12;8m \x1b[48;2;11;14;8m\x1b[38;2;11;15;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;14;7m \x1b[48;2;26;15;8m\x1b[38;2;27;15;8m▀\x1b[48;2;162;12;5m\x1b[38;2;166;20;8m▀\x1b[48;2;143;168;130m\x1b[38;2;18;142;37m▀\x1b[48;2;240;96;105m\x1b[38;2;125;158;211m▀\x1b[m
\x1b[48;2;54;0;0m\x1b[38;2;187;22;0m▀\x1b[48;2;204;0;0m\x1b[38;2;128;208;0m▀\x1b[48;2;162;1;1m\x1b[38;2;168;3;1m▀\x1b[48;2;35;13;7m\x1b[38;2;36;13;7m▀\x1b[48;2;9;15;8m \x1b[48;2;11;15;8m \x1b[38;2;11;14;7m▀\x1b[38;2;9;14;7m▀\x1b[38;2;8;14;7m▀\x1b[48;2;10;14;7m\x1b[38;2;21;18;11m▀\x1b[48;2;7;13;6m\x1b[38;2;65;30;23m▀\x1b[48;2;12;16;9m\x1b[38;2;129;45;38m▀\x1b[48;2;57;29;23m\x1b[38;2;176;53;47m▀\x1b[48;2;148;49;44m\x1b[38;2;191;53;48m▀\x1b[48;2;187;52;48m\x1b[38;2;192;53;48m▀\x1b[48;2;186;51;47m\x1b[38;2;194;54;49m▀\x1b[48;2;182;52;47m\x1b[38;2;178;52;46m▀\x1b[48;2;59;27;21m\x1b[38;2;53;26;19m▀\x1b[48;2;8;14;7m \x1b[48;2;11;15;8m \x1b[48;2;11;14;8m\x1b[38;2;11;15;8m▀\x1b[48;2;11;12;8m\x1b[38;2;11;14;8m▀\x1b[48;2;10;30;7m\x1b[38;2;10;23;7m▀\x1b[48;2;5;110;3m\x1b[38;2;3;138;2m▀\x1b[48;2;2;149;2m\x1b[38;2;0;181;0m▀\x1b[48;2;6;92;4m\x1b[38;2;5;100;4m▀\x1b[48;2;11;13;8m \x1b[48;2;11;14;8m \x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;25;14;7m\x1b[38;2;26;14;7m▀\x1b[48;2;152;2;1m\x1b[38;2;158;5;2m▀\x1b[48;2;6;0;0m\x1b[38;2;44;193;0m▀\x1b[48;2;108;0;0m\x1b[38;2;64;70;0m▀\x1b[m
\x1b[48;2;44;0;0m\x1b[38;2;177;0;0m▀\x1b[48;2;147;0;0m\x1b[38;2;71;0;0m▀\x1b[48;2;148;1;1m\x1b[38;2;155;1;1m▀\x1b[48;2;33;13;7m\x1b[38;2;34;13;7m▀\x1b[48;2;9;15;8m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m\x1b[38;2;11;15;8m▀\x1b[48;2;10;14;7m▀\x1b[48;2;9;14;7m▀\x1b[48;2;13;16;9m\x1b[38;2;11;14;7m▀\x1b[48;2;42;24;17m\x1b[38;2;9;14;7m▀\x1b[48;2;97;38;32m\x1b[38;2;10;15;8m▀\x1b[48;2;149;49;44m\x1b[38;2;30;21;14m▀\x1b[48;2;174;52;48m\x1b[38;2;79;34;28m▀\x1b[48;2;178;52;48m\x1b[38;2;136;45;40m▀\x1b[38;2;172;51;47m▀\x1b[48;2;173;52;48m\x1b[38;2;181;52;48m▀\x1b[48;2;147;47;42m\x1b[38;2;183;52;48m▀\x1b[48;2;94;35;30m\x1b[38;2;177;52;48m▀\x1b[48;2;25;19;12m\x1b[38;2;56;27;20m▀\x1b[48;2;10;14;7m\x1b[38;2;8;14;7m▀\x1b[48;2;11;12;8m\x1b[38;2;11;15;8m▀\x1b[48;2;10;23;7m\x1b[38;2;11;14;8m▀\x1b[48;2;7;76;5m\x1b[38;2;11;13;8m▀\x1b[48;2;2;152;1m\x1b[38;2;9;45;6m▀\x1b[48;2;0;177;0m\x1b[38;2;5;106;3m▀\x1b[48;2;0;178;0m\x1b[38;2;4;123;3m▀\x1b[48;2;1;168;1m\x1b[38;2;5;104;3m▀\x1b[48;2;8;53;6m\x1b[38;2;9;47;6m▀\x1b[48;2;11;12;8m\x1b[38;2;11;13;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;24;14;7m\x1b[38;2;25;14;7m▀\x1b[48;2;140;2;1m\x1b[38;2;146;2;1m▀\x1b[48;2;219;0;0m\x1b[38;2;225;0;0m▀\x1b[48;2;126;0;0m\x1b[38;2;117;0;0m▀\x1b[m
\x1b[48;2;34;0;0m\x1b[38;2;167;0;0m▀\x1b[48;2;89;0;0m\x1b[38;2;14;0;0m▀\x1b[48;2;134;1;1m\x1b[38;2;141;1;1m▀\x1b[48;2;31;13;7m\x1b[38;2;32;13;7m▀\x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m\x1b[38;2;11;15;8m▀\x1b[48;2;10;14;7m\x1b[38;2;11;14;7m▀\x1b[48;2;53;29;22m\x1b[38;2;10;14;7m▀\x1b[48;2;127;46;41m\x1b[38;2;20;18;11m▀\x1b[48;2;158;51;47m\x1b[38;2;57;28;22m▀\x1b[48;2;166;52;48m\x1b[38;2;113;42;36m▀\x1b[48;2;167;52;48m\x1b[38;2;156;50;46m▀\x1b[48;2;164;52;48m\x1b[38;2;171;52;48m▀\x1b[48;2;146;48;44m\x1b[38;2;172;52;48m▀\x1b[48;2;102;38;33m▀\x1b[48;2;50;26;19m\x1b[38;2;161;51;46m▀\x1b[48;2;17;17;10m\x1b[38;2;126;44;38m▀\x1b[48;2;8;14;7m\x1b[38;2;71;31;25m▀\x1b[48;2;10;14;7m\x1b[38;2;27;19;13m▀\x1b[48;2;11;13;8m\x1b[38;2;10;14;7m▀\x1b[48;2;9;40;6m\x1b[38;2;10;13;7m▀\x1b[48;2;4;119;3m\x1b[38;2;11;20;7m▀\x1b[48;2;1;168;1m\x1b[38;2;8;63;5m▀\x1b[48;2;0;177;0m\x1b[38;2;3;130;2m▀\x1b[48;2;0;175;0m\x1b[38;2;1;171;1m▀\x1b[48;2;1;174;1m\x1b[38;2;0;176;0m▀\x1b[48;2;1;175;1m\x1b[38;2;1;174;1m▀\x1b[48;2;0;177;0m\x1b[38;2;0;176;0m▀\x1b[48;2;3;134;2m\x1b[38;2;2;158;1m▀\x1b[48;2;10;21;7m\x1b[38;2;9;38;6m▀\x1b[48;2;11;14;8m\x1b[38;2;11;13;8m▀\x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;23;14;7m \x1b[48;2;127;2;1m\x1b[38;2;133;2;1m▀\x1b[48;2;176;0;0m\x1b[38;2;213;0;0m▀\x1b[48;2;109;0;0m\x1b[38;2;100;0;0m▀\x1b[m
\x1b[48;2;24;0;0m\x1b[38;2;157;0;0m▀\x1b[48;2;32;0;0m\x1b[38;2;165;0;0m▀\x1b[48;2;121;1;1m\x1b[38;2;128;1;1m▀\x1b[48;2;28;13;7m\x1b[38;2;30;13;7m▀\x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m \x1b[48;2;9;15;7m \x1b[48;2;88;41;34m\x1b[38;2;91;41;34m▀\x1b[48;2;145;51;47m\x1b[38;2;163;53;49m▀\x1b[48;2;107;42;36m\x1b[38;2;161;52;48m▀\x1b[48;2;58;29;22m\x1b[38;2;155;51;47m▀\x1b[48;2;21;18;11m\x1b[38;2;128;45;40m▀\x1b[48;2;9;14;7m\x1b[38;2;79;33;27m▀\x1b[38;2;33;21;15m▀\x1b[48;2;11;14;7m\x1b[38;2;12;15;8m▀\x1b[48;2;11;15;8m\x1b[38;2;9;14;7m▀\x1b[38;2;10;14;7m▀ \x1b[48;2;11;12;8m\x1b[38;2;11;14;8m▀\x1b[48;2;8;54;6m\x1b[38;2;10;28;7m▀\x1b[48;2;6;93;4m\x1b[38;2;4;125;3m▀\x1b[48;2;2;152;1m\x1b[38;2;0;175;0m▀\x1b[48;2;0;176;0m▀\x1b[48;2;0;175;0m\x1b[38;2;1;174;1m▀\x1b[48;2;0;177;0m\x1b[38;2;1;175;1m▀\x1b[48;2;0;175;0m▀▀\x1b[48;2;1;162;1m\x1b[38;2;0;176;0m▀\x1b[48;2;9;47;6m\x1b[38;2;6;95;4m▀\x1b[48;2;11;13;8m \x1b[48;2;11;15;8m\x1b[38;2;11;14;8m▀ \x1b[48;2;10;15;8m \x1b[48;2;21;13;7m\x1b[38;2;22;13;7m▀\x1b[48;2;114;2;1m\x1b[38;2;121;2;1m▀\x1b[48;2;164;0;0m\x1b[38;2;170;0;0m▀\x1b[48;2;127;0;0m\x1b[38;2;118;0;0m▀\x1b[m
\x1b[48;2;14;0;0m\x1b[38;2;147;0;0m▀\x1b[48;2;183;0;0m\x1b[38;2;108;0;0m▀\x1b[48;2;107;1;1m\x1b[38;2;114;1;1m▀\x1b[48;2;26;13;7m\x1b[38;2;27;13;7m▀\x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[38;2;11;14;7m▀ \x1b[48;2;10;14;7m\x1b[38;2;43;27;20m▀\x1b[48;2;9;14;7m\x1b[38;2;42;25;18m▀\x1b[48;2;11;14;7m\x1b[38;2;14;16;9m▀\x1b[48;2;11;15;8m\x1b[38;2;9;14;7m▀\x1b[38;2;10;14;7m▀\x1b[38;2;11;14;7m▀ \x1b[48;2;11;12;8m \x1b[48;2;9;49;6m\x1b[38;2;8;64;5m▀\x1b[48;2;1;166;1m\x1b[38;2;1;159;1m▀\x1b[48;2;0;175;0m\x1b[38;2;1;171;1m▀ \x1b[48;2;1;159;1m\x1b[38;2;1;167;1m▀\x1b[48;2;7;79;5m\x1b[38;2;4;122;3m▀\x1b[48;2;2;144;2m\x1b[38;2;2;158;1m▀\x1b[48;2;0;158;1m\x1b[38;2;0;177;0m▀\x1b[48;2;7;44;6m\x1b[38;2;4;112;3m▀\x1b[48;2;9;12;7m\x1b[38;2;11;17;7m▀\x1b[48;2;9;14;7m\x1b[38;2;11;14;8m▀\x1b[38;2;11;15;8m▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;11;14;7m▀\x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;20;13;7m\x1b[38;2;21;13;7m▀\x1b[48;2;102;2;1m\x1b[38;2;108;2;1m▀\x1b[48;2;121;0;0m\x1b[38;2;127;0;0m▀\x1b[48;2;146;0;0m\x1b[38;2;136;0;0m▀\x1b[m
\x1b[48;2;3;0;0m\x1b[38;2;137;0;0m▀\x1b[48;2;173;0;0m\x1b[38;2;50;0;0m▀\x1b[48;2;93;1;1m\x1b[38;2;100;1;1m▀\x1b[48;2;24;13;7m\x1b[38;2;25;13;7m▀\x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m\x1b[38;2;11;15;8m▀▀\x1b[48;2;17;14;7m\x1b[38;2;11;14;8m▀\x1b[48;2;49;12;7m\x1b[38;2;9;24;7m▀\x1b[48;2;62;54;4m\x1b[38;2;8;133;2m▀\x1b[48;2;7;159;1m\x1b[38;2;2;176;0m▀\x1b[48;2;0;175;0m \x1b[48;2;1;172;1m\x1b[38;2;0;175;0m▀\x1b[48;2;1;159;1m\x1b[38;2;0;173;1m▀\x1b[48;2;46;122;19m\x1b[38;2;1;176;0m▀\x1b[48;2;122;63;45m\x1b[38;2;45;111;18m▀\x1b[48;2;135;52;49m\x1b[38;2;75;36;31m▀\x1b[48;2;135;53;49m\x1b[38;2;74;36;30m▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;136;53;49m\x1b[38;2;75;37;31m▀\x1b[48;2;119;49;45m\x1b[38;2;66;34;28m▀\x1b[48;2;25;20;13m\x1b[38;2;18;18;11m▀\x1b[48;2;10;14;7m \x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;19;13;7m \x1b[48;2;89;2;1m\x1b[38;2;95;2;1m▀\x1b[48;2;77;0;0m\x1b[38;2;83;0;0m▀\x1b[48;2;128;0;0m\x1b[38;2;119;0;0m▀\x1b[m
\x1b[48;2;60;0;0m\x1b[38;2;126;0;0m▀\x1b[48;2;182;0;0m\x1b[38;2;249;0;0m▀\x1b[48;2;83;1;1m\x1b[38;2;87;1;1m▀\x1b[48;2;22;13;7m\x1b[38;2;23;13;7m▀\x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[48;2;11;14;7m\x1b[38;2;16;14;7m▀\x1b[48;2;14;14;7m\x1b[38;2;42;13;7m▀\x1b[48;2;58;13;6m\x1b[38;2;95;11;5m▀\x1b[48;2;34;13;7m\x1b[38;2;100;11;5m▀\x1b[48;2;9;14;7m\x1b[38;2;21;17;7m▀\x1b[48;2;11;12;8m\x1b[38;2;8;55;6m▀\x1b[38;2;7;75;5m▀\x1b[38;2;8;65;5m▀\x1b[48;2;11;13;8m\x1b[38;2;9;41;6m▀\x1b[48;2;12;15;8m\x1b[38;2;60;37;28m▀\x1b[38;2;90;42;37m▀\x1b[38;2;88;42;36m▀▀▀▀▀▀▀▀▀▀▀▀\x1b[38;2;89;42;37m▀\x1b[38;2;78;39;33m▀\x1b[48;2;11;15;8m\x1b[38;2;20;18;11m▀\x1b[48;2;11;14;7m\x1b[38;2;10;14;7m▀\x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;18;13;7m \x1b[48;2;78;2;1m\x1b[38;2;83;2;1m▀\x1b[48;2;196;0;0m\x1b[38;2;40;0;0m▀\x1b[48;2;217;0;0m\x1b[38;2;137;0;0m▀\x1b[m
\x1b[48;2;227;0;0m\x1b[38;2;16;0;0m▀\x1b[48;2;116;0;0m\x1b[38;2;21;0;0m▀\x1b[48;2;79;1;1m\x1b[38;2;81;1;1m▀\x1b[48;2;22;13;7m \x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[38;2;10;15;8m▀\x1b[48;2;10;15;8m\x1b[38;2;21;14;7m▀\x1b[48;2;11;15;8m\x1b[38;2;14;14;7m▀\x1b[38;2;11;14;7m▀ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ \x1b[48;2;10;15;8m \x1b[48;2;17;13;7m\x1b[38;2;18;13;7m▀\x1b[48;2;75;2;1m\x1b[38;2;76;2;1m▀\x1b[48;2;97;0;0m\x1b[38;2;34;0;0m▀\x1b[48;2;76;0;0m\x1b[38;2;147;0;0m▀\x1b[m
\x1b[48;2;161;0;0m\x1b[38;2;183;0;0m▀\x1b[48;2;49;0;0m\x1b[38;2;211;0;0m▀\x1b[48;2;75;1;1m\x1b[38;2;77;1;1m▀\x1b[48;2;21;13;7m \x1b[48;2;10;15;8m \x1b[48;2;11;15;8m \x1b[48;2;10;15;8m \x1b[48;2;17;13;7m \x1b[48;2;71;2;1m\x1b[38;2;73;2;1m▀\x1b[48;2;253;0;0m\x1b[38;2;159;0;0m▀\x1b[48;2;191;0;0m\x1b[38;2;5;0;0m▀\x1b[m
\x1b[48;2;110;161;100m\x1b[38;2;116;0;0m▀\x1b[48;2;9;205;205m\x1b[38;2;192;0;0m▀\x1b[48;2;78;0;0m\x1b[38;2;77;1;0m▀\x1b[48;2;66;3;1m\x1b[38;2;30;11;6m▀\x1b[48;2;42;8;4m\x1b[38;2;9;15;8m▀\x1b[48;2;39;8;4m\x1b[38;2;10;15;8m▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;40;8;4m▀\x1b[48;2;39;8;4m▀▀▀▀▀▀▀\x1b[48;2;40;8;4m▀▀▀\x1b[48;2;39;8;4m▀\x1b[48;2;40;8;4m▀\x1b[48;2;39;8;4m▀\x1b[48;2;41;8;4m\x1b[38;2;9;15;8m▀\x1b[48;2;62;4;2m\x1b[38;2;24;13;7m▀\x1b[48;2;78;0;0m\x1b[38;2;74;1;1m▀\x1b[48;2;221;222;0m\x1b[38;2;59;0;0m▀\x1b[48;2;67;199;133m\x1b[38;2;85;0;0m▀\x1b[m
\x1b[48;2;0;0;0m\x1b[38;2;143;233;149m▀\x1b[48;2;108;184;254m\x1b[38;2;213;6;76m▀\x1b[48;2;197;183;82m\x1b[38;2;76;0;0m▀\x1b[48;2;154;157;0m▀\x1b[48;2;96;0;0m▀\x1b[48;2;253;0;0m▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀\x1b[48;2;226;0;0m▀\x1b[48;2;255;127;255m▀\x1b[48;2;84;36;66m\x1b[38;2;64;247;251m▀\x1b[48;2;0;0;0m\x1b[38;2;18;76;210m▀\x1b[m
\x1b[48;2;0;0;0m \x1b[m
\x1b[48;2;0;0;0m \x1b[m
"""
)
if __name__ == "__main__":
main()
| 244.282609 | 1,457 | 0.6346 |
1913133823fd64e554b3941719bf51c7f8af7d75 | 2,441 | py | Python | tests/links_tests/update_tests/test_cgcnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 184 | 2019-11-27T12:59:01.000Z | 2022-03-29T19:18:54.000Z | tests/links_tests/update_tests/test_cgcnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 21 | 2019-12-08T01:53:33.000Z | 2020-10-23T01:19:56.000Z | tests/links_tests/update_tests/test_cgcnn_update.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 45 | 2019-11-28T09:59:54.000Z | 2022-02-07T02:42:46.000Z | import numpy
import pytest
from chainer import cuda
from chainer_chemistry.links.update.cgcnn_update import CGCNNUpdate
# node_size_list means the first moleculae has three nodes,
# and the seconde molecule has five nodes
node_size_list = [3, 5]
max_num_nbr = 6
node_feature_dim = 10
edge_feature_dim = 15
out_dim = node_feature_dim
batch_size = 2
@pytest.fixture
def update():
return CGCNNUpdate(n_site_features=node_feature_dim)
@pytest.fixture
def data():
if len(node_size_list) != batch_size:
raise ValueError("Invalid fixture data for CGCNN")
numpy.random.seed(0)
total_node_size = sum(node_size_list)
atom_feat = numpy.random.rand(total_node_size,
node_feature_dim).astype(numpy.float32)
nbr_feat = numpy.random.rand(total_node_size, max_num_nbr,
edge_feature_dim).astype(numpy.float32)
# nbr_idx
curr_idx = 0
nbr_idx = []
for val in node_size_list:
for _ in range(val):
max_val = curr_idx + val
nbr_idx.append(numpy.random.randint(curr_idx,
max_val, max_num_nbr))
curr_idx += val
nbr_idx = numpy.array(nbr_idx, dtype=numpy.int32)
y_grad = numpy.random.uniform(-1, 1,
(batch_size, out_dim)).astype(numpy.float32)
return atom_feat, nbr_feat, nbr_idx, y_grad
def check_forward(update, data):
y_actual = cuda.to_cpu(update(*data).data)
assert y_actual.shape == (sum(node_size_list), out_dim)
def test_forward_cpu(update, data):
atom_feat, nbr_feat, nbr_idx = data[:-1]
check_forward(update, (atom_feat, nbr_feat, nbr_idx))
@pytest.mark.gpu
def test_forward_gpu(update, data):
input_data = [cuda.to_gpu(d) for d in data[:-1]]
update.to_gpu()
check_forward(update, tuple(input_data))
# def test_backward_cpu(update, data):
# input_data, y_grad = data[0:-1], data[-1]
# gradient_check.check_backward(update, tuple(input_data), y_grad,
# atol=5e-1, rtol=1e-1)
# @pytest.mark.gpu
# def test_backward_gpu(update, data):
# atom_data, adj_data, y_grad = [cuda.to_gpu(d) for d in data]
# update.to_gpu()
# gradient_check.check_backward(update, (atom_data, adj_data), y_grad,
# atol=5e-1, rtol=1e-1)
if __name__ == '__main__':
pytest.main([__file__, '-v'])
| 29.059524 | 78 | 0.649324 |
8ca5674b7dd82166d8c953e450e3d08f4ccb3297 | 314 | py | Python | kite_runner/signals.py | thinhnguyenuit/kite_runner | 17f23f872318a78e573b0ff6bed04f783f082de1 | [
"MIT"
] | null | null | null | kite_runner/signals.py | thinhnguyenuit/kite_runner | 17f23f872318a78e573b0ff6bed04f783f082de1 | [
"MIT"
] | 1 | 2021-11-10T16:29:38.000Z | 2021-11-10T16:29:38.000Z | kite_runner/signals.py | thinhnguyenuit/kite_runner | 17f23f872318a78e573b0ff6bed04f783f082de1 | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from kite_runner.models import Profile, User
@receiver(post_save, sender=User)
def create_user_profile(sender, instance=None, created=False, **kwargs):
if instance and created:
Profile.objects.create(user=instance)
| 28.545455 | 72 | 0.786624 |
eac63180cc630987e16fe35de4ac342c044e1679 | 221 | py | Python | contrib/wallettools/walletchangepass.py | siriusnlz/test123 | 7f579d28094439d9be5d4d23301b21c19e636588 | [
"MIT"
] | null | null | null | contrib/wallettools/walletchangepass.py | siriusnlz/test123 | 7f579d28094439d9be5d4d23301b21c19e636588 | [
"MIT"
] | null | null | null | contrib/wallettools/walletchangepass.py | siriusnlz/test123 | 7f579d28094439d9be5d4d23301b21c19e636588 | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:22057")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| 36.833333 | 49 | 0.769231 |
4604333b6e8eadfa7c35aec2acacece0f69cefd8 | 19,105 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ncs1k_macsec_ea_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ncs1k_macsec_ea_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ncs1k_macsec_ea_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ncs1KCipherSuitEnum' : _MetaInfoEnum('Ncs1KCipherSuitEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper',
{
'gcm-aes-256':'GCM_AES_256',
'gcm-aes-128':'GCM_AES_128',
'gcm-aes-xpn-256':'GCM_AES_XPN_256',
}, 'Cisco-IOS-XR-ncs1k-macsec-ea-oper', _yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper']),
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus.ActiveAssociation' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus.ActiveAssociation',
False,
[
_MetaInfoClassMember('association-number', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Assocition Number
''',
'association_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('device-association-number', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Devive Association Number
''',
'device_association_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('key-crc', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' 32bit CRC of Programmed Key
''',
'key_crc',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('programmed-time', ATTRIBUTE, 'str' , None, None,
[(0, 30)], [],
''' Key Programmed Time
''',
'programmed_time',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('short-secure-channel-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Short Secure Channel Id
''',
'short_secure_channel_id',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('xpn-salt', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '4294967295')], [],
''' XPN Salt
''',
'xpn_salt',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'active-association',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus',
False,
[
_MetaInfoClassMember('active-association', REFERENCE_LIST, 'ActiveAssociation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus.ActiveAssociation',
[], [],
''' Active Associations
''',
'active_association',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('cipher-suite', REFERENCE_ENUM_CLASS, 'Ncs1KCipherSuitEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KCipherSuitEnum',
[], [],
''' Cipher Suite
''',
'cipher_suite',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('confidentiality-offset', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Confidentiality offset
''',
'confidentiality_offset',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('initial-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Initial Packet Number
''',
'initial_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('max-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Maximum Packet Number
''',
'max_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('protection-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Protection Enabled
''',
'protection_enabled',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('recent-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Recent Packet Number
''',
'recent_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('secure-channel-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Secure Channel Id
''',
'secure_channel_id',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('secure-tag-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Secure Tag Length
''',
'secure_tag_length',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'encrypt-sc-status',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus.ActiveAssociation' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus.ActiveAssociation',
False,
[
_MetaInfoClassMember('association-number', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Assocition Number
''',
'association_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('device-association-number', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Devive Association Number
''',
'device_association_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('key-crc', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' 32bit CRC of Programmed Key
''',
'key_crc',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('programmed-time', ATTRIBUTE, 'str' , None, None,
[(0, 30)], [],
''' Key Programmed Time
''',
'programmed_time',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('short-secure-channel-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Short Secure Channel Id
''',
'short_secure_channel_id',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('xpn-salt', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '4294967295')], [],
''' XPN Salt
''',
'xpn_salt',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'active-association',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus',
False,
[
_MetaInfoClassMember('active-association', REFERENCE_LIST, 'ActiveAssociation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus.ActiveAssociation',
[], [],
''' Active Associations
''',
'active_association',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('cipher-suite', REFERENCE_ENUM_CLASS, 'Ncs1KCipherSuitEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KCipherSuitEnum',
[], [],
''' Cipher Suite
''',
'cipher_suite',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('confidentiality-offset', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Confidentiality offset
''',
'confidentiality_offset',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('initial-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Initial Packet Number
''',
'initial_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('max-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Maximum Packet Number
''',
'max_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('protection-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Protection Enabled
''',
'protection_enabled',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('recent-packet-number', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Recent Packet Number
''',
'recent_packet_number',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('secure-channel-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Secure Channel Id
''',
'secure_channel_id',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('secure-tag-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Secure Tag Length
''',
'secure_tag_length',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'decrypt-sc-status',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo',
False,
[
_MetaInfoClassMember('decrypt-sc-status', REFERENCE_CLASS, 'DecryptScStatus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus',
[], [],
''' Decrypt Secure Channel Status
''',
'decrypt_sc_status',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('encrypt-sc-status', REFERENCE_CLASS, 'EncryptScStatus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus',
[], [],
''' Encrypt Secure Channel Status
''',
'encrypt_sc_status',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('must-secure', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Must Secure
''',
'must_secure',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
_MetaInfoClassMember('replay-window-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Replay Window Size
''',
'replay_window_size',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'ncs1k-status-info',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Port name
''',
'name',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', True),
_MetaInfoClassMember('ncs1k-status-info', REFERENCE_CLASS, 'Ncs1KStatusInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo',
[], [],
''' controller data
''',
'ncs1k_status_info',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'ncs1k-macsec-ctrlr-name',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames',
False,
[
_MetaInfoClassMember('ncs1k-macsec-ctrlr-name', REFERENCE_LIST, 'Ncs1KMacsecCtrlrName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName',
[], [],
''' Interface name
''',
'ncs1k_macsec_ctrlr_name',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'ncs1k-macsec-ctrlr-names',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
'Ncs1KMacsecOper' : {
'meta_info' : _MetaInfoClass('Ncs1KMacsecOper',
False,
[
_MetaInfoClassMember('ncs1k-macsec-ctrlr-names', REFERENCE_CLASS, 'Ncs1KMacsecCtrlrNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper', 'Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames',
[], [],
''' All Macsec operational data
''',
'ncs1k_macsec_ctrlr_names',
'Cisco-IOS-XR-ncs1k-macsec-ea-oper', False),
],
'Cisco-IOS-XR-ncs1k-macsec-ea-oper',
'ncs1k-macsec-oper',
_yang_ns._namespaces['Cisco-IOS-XR-ncs1k-macsec-ea-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_macsec_ea_oper'
),
},
}
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus.ActiveAssociation']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus.ActiveAssociation']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.EncryptScStatus']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo.DecryptScStatus']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName.Ncs1KStatusInfo']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames.Ncs1KMacsecCtrlrName']['meta_info'].parent =_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames']['meta_info']
_meta_table['Ncs1KMacsecOper.Ncs1KMacsecCtrlrNames']['meta_info'].parent =_meta_table['Ncs1KMacsecOper']['meta_info']
| 55.057637 | 266 | 0.55865 |
9371a20331f510d162e06e85427e248c67874d46 | 12,937 | py | Python | mmseg/models/backbones/stdcnet.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | mmseg/models/backbones/stdcnet.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | mmseg/models/backbones/stdcnet.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
class ConvX(nn.Module):
def __init__(self, in_planes, out_planes, kernel=3, stride=1):
super(ConvX, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel, stride=stride, padding=kernel // 2, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu(self.bn(self.conv(x)))
return out
class CatBottleneck(nn.Module):
def __init__(self, in_planes, out_planes, block_num=3, stride=1):
super().__init__()
assert block_num > 1, print("block number should be larger than 1.")
self.conv_list = nn.ModuleList()
self.stride = stride
if stride == 2:
self.avd_layer = nn.Sequential(
nn.Conv2d(out_planes // 2, out_planes // 2,
kernel_size=3, stride=2, padding=1,
groups=out_planes // 2, bias=False),
nn.BatchNorm2d(out_planes // 2),
)
self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
stride = 1
for idx in range(block_num):
if idx == 0:
self.conv_list.append(ConvX(in_planes, out_planes // 2, kernel=1))
elif idx == 1 and block_num == 2:
self.conv_list.append(ConvX(out_planes // 2, out_planes // 2, stride=stride))
elif idx == 1 and block_num > 2:
self.conv_list.append(ConvX(out_planes // 2, out_planes // 4, stride=stride))
elif idx < block_num - 1:
self.conv_list.append(
ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx + 1))))
else:
self.conv_list.append(ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx))))
def forward(self, x):
out_list = []
out1 = self.conv_list[0](x)
for idx, conv in enumerate(self.conv_list[1:]):
if idx == 0:
if self.stride == 2:
out = conv(self.avd_layer(out1))
else:
out = conv(out1)
else:
out = conv(out)
out_list.append(out)
if self.stride == 2:
out1 = self.skip(out1)
out_list.insert(0, out1)
out = torch.cat(out_list, dim=1)
return out
# STDC2Net
class STDCNet1446(nn.Module):
def __init__(self, base=64, layers=(4, 5, 3), block_num=4):
super().__init__()
self.features = self._make_layers(base, layers, block_num, CatBottleneck)
self.gap = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(max(1024, base * 16), max(1024, base * 16), bias=False)
self.bn = nn.BatchNorm1d(max(1024, base * 16))
self.relu = nn.ReLU(inplace=True)
self.x2 = nn.Sequential(self.features[:1])
self.x4 = nn.Sequential(self.features[1:2])
self.x8 = nn.Sequential(self.features[2:6])
self.x16 = nn.Sequential(self.features[6:11])
self.x32 = nn.Sequential(self.features[11:])
@staticmethod
def _make_layers(base, layers, block_num, block):
features = []
features += [ConvX(3, base // 2, 3, 2)]
features += [ConvX(base // 2, base, 3, 2)]
for i, layer in enumerate(layers):
for j in range(layer):
if i == 0 and j == 0:
features.append(block(base, base * 4, block_num, 2))
elif j == 0:
features.append(block(base * int(math.pow(2, i + 1)), base * int(math.pow(2, i + 2)), block_num, 2))
else:
features.append(block(base * int(math.pow(2, i + 2)), base * int(math.pow(2, i + 2)), block_num, 1))
return nn.Sequential(*features)
def forward(self, x):
feat2 = self.x2(x)
feat4 = self.x4(feat2)
feat8 = self.x8(feat4)
feat16 = self.x16(feat8)
feat32 = self.x32(feat16)
return feat2, feat4, feat8, feat16, feat32
# STDC1Net
class STDCNet813(nn.Module):
def __init__(self, base=64, layers=(2, 2, 2), block_num=4):
super().__init__()
self.features = self._make_layers(base, layers, block_num, CatBottleneck)
self.gap = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(max(1024, base * 16), max(1024, base * 16), bias=False)
self.bn = nn.BatchNorm1d(max(1024, base * 16))
self.relu = nn.ReLU(inplace=True)
self.x2 = nn.Sequential(self.features[:1])
self.x4 = nn.Sequential(self.features[1:2])
self.x8 = nn.Sequential(self.features[2:4])
self.x16 = nn.Sequential(self.features[4:6])
self.x32 = nn.Sequential(self.features[6:])
@staticmethod
def _make_layers(base, layers, block_num, block):
features = []
features += [ConvX(3, base // 2, 3, 2)]
features += [ConvX(base // 2, base, 3, 2)]
for i, layer in enumerate(layers):
for j in range(layer):
if i == 0 and j == 0:
features.append(block(base, base * 4, block_num, 2))
elif j == 0:
features.append(block(base * int(math.pow(2, i + 1)), base * int(math.pow(2, i + 2)), block_num, 2))
else:
features.append(block(base * int(math.pow(2, i + 2)), base * int(math.pow(2, i + 2)), block_num, 1))
return nn.Sequential(*features)
def forward(self, x):
feat2 = self.x2(x)
feat4 = self.x4(feat2)
feat8 = self.x8(feat4)
feat16 = self.x16(feat8)
feat32 = self.x32(feat16)
return feat2, feat4, feat8, feat16, feat32
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1):
super().__init__()
self.conv = nn.Conv2d(
in_chan,
out_chan,
kernel_size=ks,
stride=stride,
padding=padding,
bias=False
)
self.bn = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU()
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
class AttentionRefinementModule(nn.Module):
def __init__(self, in_chan, out_chan):
super().__init__()
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)
self.bn_atten = nn.BatchNorm2d(out_chan)
self.sigmoid_atten = nn.Sigmoid()
self.init_weight()
def forward(self, x):
feat = self.conv(x)
attention = F.avg_pool2d(feat, feat.size()[2:])
attention = self.conv_atten(attention)
attention = self.bn_atten(attention)
attention = self.sigmoid_atten(attention)
out = torch.mul(feat, attention)
return out
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
class ContextPath(nn.Module):
def __init__(self, backbone='CatNetSmall'):
super().__init__()
self.backbone_name = backbone
if backbone == 'STDCNet1446':
self.backbone = STDCNet1446()
self.arm16 = AttentionRefinementModule(512, 128)
inplanes = 1024
self.arm32 = AttentionRefinementModule(inplanes, 128)
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0)
elif backbone == 'STDCNet813':
self.backbone = STDCNet813()
self.arm16 = AttentionRefinementModule(512, 128)
inplanes = 1024
self.arm32 = AttentionRefinementModule(inplanes, 128)
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0)
else:
raise ValueError(f'backbone \'{backbone}\' is not in backbone list')
def forward(self, x):
feat2, feat4, feat8, feat16, feat32 = self.backbone(x)
H8, W8 = feat8.size()[2:]
H16, W16 = feat16.size()[2:]
H32, W32 = feat32.size()[2:]
avg = F.avg_pool2d(feat32, feat32.size()[2:])
avg = self.conv_avg(avg)
avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
feat32_arm = self.arm32(feat32)
feat32_sum = feat32_arm + avg_up
feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
feat32_up = self.conv_head32(feat32_up)
feat16_arm = self.arm16(feat16)
feat16_sum = feat16_arm + feat32_up
feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
feat16_up = self.conv_head16(feat16_up)
return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16
class FeatureFusionModule(nn.Module):
def __init__(self, in_chan, out_chan):
super().__init__()
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
self.conv1 = nn.Conv2d(
out_chan,
out_chan // 4,
kernel_size=1,
stride=1,
padding=0,
bias=False
)
self.conv2 = nn.Conv2d(
out_chan // 4,
out_chan,
kernel_size=1,
stride=1,
padding=0,
bias=False
)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, fsp, fcp):
fcat = torch.cat([fsp, fcp], dim=1)
feat = self.convblk(fcat)
attention = F.avg_pool2d(feat, feat.size()[2:])
attention = self.conv1(attention)
attention = self.relu(attention)
attention = self.conv2(attention)
attention = self.sigmoid(attention)
feat_atten = torch.mul(feat, attention)
feat_out = feat_atten + feat
return feat_out
@BACKBONES.register_module()
class STDCNet(nn.Module):
def __init__(self,
extra,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=False):
super().__init__()
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.cp = ContextPath(self.extra['backbone'])
fuse_in_channels = 256 + 128
self.ffm = FeatureFusionModule(fuse_in_channels, 256)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
_, _, feat_res8, _, feat_cp8, feat_cp16 = self.cp(x)
feat_fuse = self.ffm(feat_res8, feat_cp8)
y_list = [feat_cp16, feat_cp8, feat_fuse]
return y_list
def train(self, mode=True):
"""Convert the model into training mode."""
super().train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 34.134565 | 120 | 0.562263 |
3b12ec8b7aa4c7692628b379b3500204ebad21cc | 186 | py | Python | MascotasPasto/views.py | Esteban-Rosas/Frameworks-7a-2020B | 768d354ee8d966c93465ff9bb2d7da27ad3e1baa | [
"MIT"
] | null | null | null | MascotasPasto/views.py | Esteban-Rosas/Frameworks-7a-2020B | 768d354ee8d966c93465ff9bb2d7da27ad3e1baa | [
"MIT"
] | null | null | null | MascotasPasto/views.py | Esteban-Rosas/Frameworks-7a-2020B | 768d354ee8d966c93465ff9bb2d7da27ad3e1baa | [
"MIT"
] | null | null | null | from django.http import HttpResponse
def MascotasPasto(request):
return HttpResponse("MascotasPasto")
def salida(request):
return HttpResponse("Hasta Luego de Mascotas Pasto") | 23.25 | 56 | 0.77957 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.