hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0204e01e045e7b7fbab8d194c09dac65e63c87ef
| 786
|
py
|
Python
|
Algorithms/pca.py
|
cody343960591/Evaluation-of-Data-Compression
|
d4aa87c83068b6e7e8622735d2ba1ae74543587d
|
[
"MIT"
] | 1
|
2019-02-22T09:29:07.000Z
|
2019-02-22T09:29:07.000Z
|
Algorithms/pca.py
|
cody343960591/Evaluation-of-Data-Compression
|
d4aa87c83068b6e7e8622735d2ba1ae74543587d
|
[
"MIT"
] | null | null | null |
Algorithms/pca.py
|
cody343960591/Evaluation-of-Data-Compression
|
d4aa87c83068b6e7e8622735d2ba1ae74543587d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
# Created on Feb-24-19 22:06
# pca.py
# @author: Cody Xu
'''
import pandas as pd
def pca(df, num, **kw):
max_value = df.max()[0]
min_value = df.min()[0]
window_size = int(kw['algorithm_param']['PCA']['window_size'])
dl_out = []
loc = 0
while loc < len(df):
x = [i for i in df[0][loc:loc + window_size]]
window_max = max(x)
window_min = min(x)
if window_max - window_min < (max_value - min_value) * 0.1 * 2:
x = [(window_max + window_min) / 2] * window_size
else:
pass
new_data = pd.DataFrame(x)
dl_out.append(new_data)
loc += window_size
df_out = pd.concat(dl_out)
df_out.to_csv('result.csv', header=0, index=0)
| 26.2
| 71
| 0.557252
|
76730965df72ab821113fc30b8bf29e7ba77e841
| 4,462
|
py
|
Python
|
function/handler.py
|
locational/fn-prevalence-predictor
|
d4df1dbf647c4fdfdc4f50b28f216e938c74c598
|
[
"MIT"
] | null | null | null |
function/handler.py
|
locational/fn-prevalence-predictor
|
d4df1dbf647c4fdfdc4f50b28f216e938c74c598
|
[
"MIT"
] | 1
|
2021-05-07T07:18:03.000Z
|
2021-05-07T07:18:03.000Z
|
function/handler.py
|
locational/fn-prevalence-predictor
|
d4df1dbf647c4fdfdc4f50b28f216e938c74c598
|
[
"MIT"
] | null | null | null |
import json
import sys
import uuid
import numpy as np
import pandas as pd
import geopandas as gp
import requests
import disarm_gears
def run_function(params: dict):
#
# 1. Handle input
#
# Set random seed
np.random.seed(1000)
layer_names = params.get('layer_names')
exceedance_threshold = params.get('exceedance_threshold')
point_data = params.get('point_data')
# Make a GeoPandas DataFrame
gdf = gp.GeoDataFrame.from_features(point_data['features'])
# Use some ~~unlikely~~ IMPOSSIBLE to collide column name
id_column_name = f'ID{uuid.uuid4().int}_hard_to_collide_id'
if id_column_name in gdf.columns:
id_column_name = f'ID_seriously_{uuid.uuid4().int}_hard_to_collide_id'
gdf[id_column_name] = list(range(len(gdf)))
# TODO: Fix this hack, use GeoPandas DataFrame throughout (except for pandas2ri.DataFrame)
input_data = pd.DataFrame(gdf)
input_data = input_data.drop('geometry', axis = 1)
input_data['lat'] = gdf.geometry.y
input_data['lng'] = gdf.geometry.x
#
# 2. Process
#
# Drop NA coordinates
# TODO: Check if Geopandas allows creating of a GeoDataFrame if some of the geoms are empty - would be a separate issue of checking params if not
#input_data.dropna(axis=0, subset=['lng', 'lat']) # TODO: this does nothing: should be catching return
# Find covariates
if layer_names is not None:
# Call fn-covariate-extractor
open_faas_link = 'http://faas.srv.disarm.io/function/fn-covariate-extractor'
just_id_and_geom = gdf.filter(['geometry', id_column_name])
req_options = {
'points': json.loads(just_id_and_geom.to_json()), # TODO: Need to to-and-from JSON here?
'layer_names': layer_names
}
covs_response = requests.post(open_faas_link, json=req_options)
# TODO define how to handle NA entries in the covariates
covs_response_json = covs_response.json()
if covs_response_json['type'] == 'error':
msg = "Problem with remote function call: " + covs_response_json['result']
raise Exception(msg)
# Merge output into input_data
# covs_data = disarm_gears.util.geojson_decoder_1(covs_response.json()['result'])
covs_result = covs_response.json()['result']
covs_gdf = gp.GeoDataFrame.from_features(covs_result['features'])
covs_data = pd.DataFrame(covs_gdf[[col for col in covs_gdf.columns if col != covs_gdf._geometry_column_name]])
covs_data = covs_data.drop('id', axis = 1) # TODO: Get fn-cov-extr to not return an `id` col
input_data = pd.merge(input_data, covs_data, how='left', left_on=[id_column_name], right_on=[id_column_name])
# Define and fit mgcv model
# TODO: Fix formula to use GeoPandas `geometry` column (e.g. `geometry.x`?)
gam_formula = "cbind(n_positive, n_trials - n_positive) ~ te(lng, lat, bs='gp', m=c(2), k=-1)"
if layer_names is not None:
gam_formula = [gam_formula] + [f'{i}' for i in layer_names]
gam_formula = '+'.join(gam_formula)
train_data = input_data.dropna(axis=0)
gam = disarm_gears.r_plugins.mgcv_fit(gam_formula, family='binomial', data=train_data)
# Make predictions/simulations
gam_pred = disarm_gears.r_plugins.mgcv_predict(gam, data=input_data, response_type='response')
link_sims = disarm_gears.r_plugins.mgcv_posterior_samples(gam, data=input_data, n_samples=200,
response_type='link')
# Credible interval
bci = np.percentile(link_sims, q=[2.5, 97.5], axis=0)
bci = 1. / (1. + np.exp(-bci))
# Exceedance probability
ex_prob = None
ex_uncert = None
if exceedance_threshold is not None:
link_threshold = np.log(exceedance_threshold / (1 - exceedance_threshold))
ex_prob = (link_sims > link_threshold).mean(axis=0)
ex_uncert = 0.5 - abs(ex_prob - 0.5)
#
# 3. Package output
#
input_data['prediction'] = gam_pred
input_data['bci_width'] = bci[1] - bci[0]
input_data['exceedance_probability'] = ex_prob
input_data['exceedance_uncertainty'] = ex_uncert
output_gdf = gp.GeoDataFrame(input_data, geometry=gp.points_from_xy(input_data.lng, input_data.lat))
slimmer_gdf = output_gdf.drop(['lat', 'lng', id_column_name], axis=1)
# return response.get('point_data')
return json.loads(slimmer_gdf.to_json())
| 39.140351
| 149
| 0.676154
|
e91791965c7dc979ff4bab7a75213b5a0c7c486b
| 940
|
py
|
Python
|
dbonconflict.py
|
InNolanFun/PyqueryLianjia
|
ad4ffddc0755b978e30b1b3501cc068d5950e7c3
|
[
"Apache-2.0"
] | null | null | null |
dbonconflict.py
|
InNolanFun/PyqueryLianjia
|
ad4ffddc0755b978e30b1b3501cc068d5950e7c3
|
[
"Apache-2.0"
] | null | null | null |
dbonconflict.py
|
InNolanFun/PyqueryLianjia
|
ad4ffddc0755b978e30b1b3501cc068d5950e7c3
|
[
"Apache-2.0"
] | null | null | null |
import localdb
def main():
tablename = 'testtable'
localdb.createdb(tablename)
ls = list()
for i in range(10):
detail_dict = dict()
# 单价
detail_dict['unit_price'] = 'unit_price{}'.format(i)
# 总价
detail_dict['all_price'] = 'all_price{}'.format(i)
# 标题
detail_dict["title"] = 'title{}'.format(i)
# 室
detail_dict['rome'] = 'rome{}'.format(i)
# 面积
detail_dict['area'] = 'area{}'.format(i)
# 年份
detail_dict['buildyear'] = 'buildyear{}'.format(i)
# 小区
detail_dict['community'] = 'community{}'.format(i)
# 区域
detail_dict['location'] = 'location{}'.format(i)
# URL
detail_dict["url"] = 'url{}'.format(i)
ls.append(detail_dict)
localdb.insertdatatodb(ls, tablename)
for i in localdb.searchdata(tablename):
print(i)
if __name__ == "__main__":
main()
| 26.111111
| 60
| 0.541489
|
22076fc0725199e85404a230dff0917b885eb9fd
| 1,111
|
py
|
Python
|
FlaskApp/app/views.py
|
matthewzhou/TopicMatch
|
d6bdcb26d25129c730487710657cefb02a0c6d02
|
[
"MIT"
] | 5
|
2018-07-23T19:12:27.000Z
|
2020-05-06T16:28:45.000Z
|
FlaskApp/app/views.py
|
matthewzhou/TopicMatch
|
d6bdcb26d25129c730487710657cefb02a0c6d02
|
[
"MIT"
] | null | null | null |
FlaskApp/app/views.py
|
matthewzhou/TopicMatch
|
d6bdcb26d25129c730487710657cefb02a0c6d02
|
[
"MIT"
] | 1
|
2020-05-06T10:37:04.000Z
|
2020-05-06T10:37:04.000Z
|
from app import app
from flask import Flask, jsonify, render_template, request, redirect
import time, math
from kafka import KafkaConsumer, KafkaClient
@app.route('/_timeseries')
def timeseries():
"""Retrieve time series for currKey"""
cumulative = []
hashtags = []
count = 0
consumer = KafkaConsumer(bootstrap_servers ='ec2-34-225-221-200.compute-1.amazonaws.com',auto_offset_reset='latest')
consumer.subscribe(['streaming-outputs1'])
for msg in consumer:
cumulative.append(int(msg[6].decode('utf-8')))
break
consumer.subscribe(['hashtags'])
for msg in consumer:
count += 1
hashtags.append(msg[6].decode('utf-8'))
if count == 4:
break
consumer.close()
return jsonify(cumulative = cumulative,hashtags = hashtags)
# returns slide deck as redirect for easy access
@app.route('/deck')
def deck():
return redirect("https://docs.google.com/presentation/d/1wEBlqWDfi3yLH2jh9gWsPNJ0S1LYAgoQQjFCy6CG-iY/edit?usp=sharing")
@app.route('/')
@app.route('/index.html')
def index():
return render_template('index.html')
| 30.861111
| 120
| 0.689469
|
7455936e5beaf01bd772e5c627d6a1366e8a8594
| 1,949
|
py
|
Python
|
utils/scripts/OOOlevelGen/src/sprites/SpikeyBuddy.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/SpikeyBuddy.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/SpikeyBuddy.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
import PhysicsMixin
import ID
BODIES = """
<dict>
<key>body</key>
<dict>
<key>x</key>
<integer>%(x)s</integer>
<key>y</key>
<integer>%(y)s</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>firstFrame</key>
<string>spikey_buddy0001.png</string>
<key>sheet_id</key>
<integer>5</integer>
<key>id</key>
<integer>%(__objID__)s</integer>
<key>name</key>
<string>%(name)s</string>
<key>static</key>
<false/>
<key>classname</key>
<string>%(classname)s</string>
</dict>
<key>shapes</key>
<array>
<dict>
<key>x</key>
<integer>0</integer>
<key>y</key>
<integer>0</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>type</key>
<string>circ</string>
<key>friction</key>
<real>%(f)s</real>
<key>density</key>
<integer>%(d)s</integer>
<key>restitution</key>
<real>%(r)s</real>
</dict>
</array>
</dict>
"""
JOINTS = """"""
CONTACTS = """
<dict>
<key>sprite1</key>
<string>Enemy</string>
<key>sprite2</key>
<string>Spikey</string>
<key>eventName</key>
<string>onDestroy</string>
</dict>
"""
class SpikeyBuddySprite(PhysicsMixin.PhysicsMixin):
def __init__(self,**kwargs):
self.params = kwargs
self.params['name'] = "Spikey"
self.process(kwargs)
self.addDefault('classname','SpikeySprite')
self.params['__objID__'] = ID.next()
def render(self):
return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)
if __name__ == "__main__":
print SpikeyBuddySprite(x=160,y=10,width=100, height=100).render()[0]
| 23.481928
| 76
| 0.533607
|
b0b5719f252b9cd892210a161e3e8fb1e8004200
| 10,877
|
py
|
Python
|
deepctr/models/sequence/dien.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 6,192
|
2017-12-05T03:02:35.000Z
|
2022-03-31T20:59:30.000Z
|
deepctr/models/sequence/dien.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 362
|
2018-04-15T06:53:20.000Z
|
2022-03-21T15:03:02.000Z
|
deepctr/models/sequence/dien.py
|
dzzxjl/DeepCTR
|
ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e
|
[
"Apache-2.0"
] | 1,960
|
2017-12-05T03:16:04.000Z
|
2022-03-31T06:37:00.000Z
|
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen, weichenswc@163.com
Reference:
[1] Zhou G, Mou N, Fan Y, et al. Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018. (https://arxiv.org/pdf/1809.03672.pdf)
"""
import tensorflow as tf
from tensorflow.python.keras.layers import (Concatenate, Dense, Permute, multiply)
from ...feature_column import SparseFeat, VarLenSparseFeat, DenseFeat, build_input_features
from ...inputs import get_varlen_pooling_list, create_embedding_matrix, embedding_lookup, varlen_embedding_lookup, \
get_dense_input
from ...layers.core import DNN, PredictionLayer
from ...layers.sequence import AttentionSequencePoolingLayer, DynamicGRU
from ...layers.utils import concat_func, reduce_mean, combined_dnn_input
def auxiliary_loss(h_states, click_seq, noclick_seq, mask, stag=None):
#:param h_states:
#:param click_seq:
#:param noclick_seq: #[B,T-1,E]
#:param mask:#[B,1]
#:param stag:
#:return:
hist_len, _ = click_seq.get_shape().as_list()[1:]
mask = tf.sequence_mask(mask, hist_len)
mask = mask[:, 0, :]
mask = tf.cast(mask, tf.float32)
click_input_ = tf.concat([h_states, click_seq], -1)
noclick_input_ = tf.concat([h_states, noclick_seq], -1)
auxiliary_nn = DNN([100, 50, 1], activation='sigmoid')
click_prop_ = auxiliary_nn(click_input_, stag=stag)[:, :, 0]
noclick_prop_ = auxiliary_nn(noclick_input_, stag=stag)[
:, :, 0] # [B,T-1]
try:
click_loss_ = - tf.reshape(tf.log(click_prop_),
[-1, tf.shape(click_seq)[1]]) * mask
except AttributeError:
click_loss_ = - tf.reshape(tf.compat.v1.log(click_prop_),
[-1, tf.shape(click_seq)[1]]) * mask
try:
noclick_loss_ = - \
tf.reshape(tf.log(1.0 - noclick_prop_),
[-1, tf.shape(noclick_seq)[1]]) * mask
except AttributeError:
noclick_loss_ = - \
tf.reshape(tf.compat.v1.log(1.0 - noclick_prop_),
[-1, tf.shape(noclick_seq)[1]]) * mask
loss_ = reduce_mean(click_loss_ + noclick_loss_)
return loss_
def interest_evolution(concat_behavior, deep_input_item, user_behavior_length, gru_type="GRU", use_neg=False,
neg_concat_behavior=None, att_hidden_size=(64, 16), att_activation='sigmoid',
att_weight_normalization=False, ):
if gru_type not in ["GRU", "AIGRU", "AGRU", "AUGRU"]:
raise ValueError("gru_type error ")
aux_loss_1 = None
embedding_size = None
rnn_outputs = DynamicGRU(embedding_size, return_sequence=True,
name="gru1")([concat_behavior, user_behavior_length])
if gru_type == "AUGRU" and use_neg:
aux_loss_1 = auxiliary_loss(rnn_outputs[:, :-1, :], concat_behavior[:, 1:, :],
neg_concat_behavior[:, 1:, :],
tf.subtract(user_behavior_length, 1), stag="gru") # [:, 1:]
if gru_type == "GRU":
rnn_outputs2 = DynamicGRU(embedding_size, return_sequence=True,
name="gru2")([rnn_outputs, user_behavior_length])
# attention_score = AttentionSequencePoolingLayer(hidden_size=att_hidden_size, activation=att_activation, weight_normalization=att_weight_normalization, return_score=True)([
# deep_input_item, rnn_outputs2, user_behavior_length])
# outputs = Lambda(lambda x: tf.matmul(x[0], x[1]))(
# [attention_score, rnn_outputs2])
# hist = outputs
hist = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, att_activation=att_activation,
weight_normalization=att_weight_normalization, return_score=False)([
deep_input_item, rnn_outputs2, user_behavior_length])
else: # AIGRU AGRU AUGRU
scores = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, att_activation=att_activation,
weight_normalization=att_weight_normalization, return_score=True)([
deep_input_item, rnn_outputs, user_behavior_length])
if gru_type == "AIGRU":
hist = multiply([rnn_outputs, Permute([2, 1])(scores)])
final_state2 = DynamicGRU(embedding_size, gru_type="GRU", return_sequence=False, name='gru2')(
[hist, user_behavior_length])
else: # AGRU AUGRU
final_state2 = DynamicGRU(embedding_size, gru_type=gru_type, return_sequence=False,
name='gru2')([rnn_outputs, user_behavior_length, Permute([2, 1])(scores)])
hist = final_state2
return hist, aux_loss_1
def DIEN(dnn_feature_columns, history_feature_list,
gru_type="GRU", use_negsampling=False, alpha=1.0, use_bn=False, dnn_hidden_units=(256, 128, 64),
dnn_activation='relu',
att_hidden_units=(64, 16), att_activation="dice", att_weight_normalization=True,
l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, seed=1024, task='binary'):
"""Instantiates the Deep Interest Evolution Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param history_feature_list: list,to indicate sequence sparse field
:param gru_type: str,can be GRU AIGRU AUGRU AGRU
:param use_negsampling: bool, whether or not use negtive sampling
:param alpha: float ,weight of auxiliary_loss
:param use_bn: bool. Whether use BatchNormalization before activation or not in deep net
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param dnn_activation: Activation function to use in DNN
:param att_hidden_units: list,list of positive integer , the layer number and units in each layer of attention net
:param att_activation: Activation function to use in attention net
:param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(dnn_feature_columns)
user_behavior_length = features["seq_length"]
sparse_feature_columns = list(
filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []
varlen_sparse_feature_columns = list(
filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
history_feature_columns = []
neg_history_feature_columns = []
sparse_varlen_feature_columns = []
history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
neg_history_fc_names = list(map(lambda x: "neg_" + x, history_fc_names))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if feature_name in history_fc_names:
history_feature_columns.append(fc)
elif feature_name in neg_history_fc_names:
neg_history_feature_columns.append(fc)
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
embedding_dict = create_embedding_matrix(dnn_feature_columns, l2_reg_embedding, seed, prefix="",
seq_mask_zero=False)
query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns,
return_feat_list=history_feature_list, to_list=True)
keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns,
return_feat_list=history_fc_names, to_list=True)
dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns,
mask_feat_list=history_feature_list, to_list=True)
dense_value_list = get_dense_input(features, dense_feature_columns)
sequence_embed_dict = varlen_embedding_lookup(embedding_dict, features, sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns,
to_list=True)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_func(keys_emb_list)
deep_input_emb = concat_func(dnn_input_emb_list)
query_emb = concat_func(query_emb_list)
if use_negsampling:
neg_uiseq_embed_list = embedding_lookup(embedding_dict, features, neg_history_feature_columns,
neg_history_fc_names, to_list=True)
neg_concat_behavior = concat_func(neg_uiseq_embed_list)
else:
neg_concat_behavior = None
hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type,
use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior,
att_hidden_size=att_hidden_units,
att_activation=att_activation,
att_weight_normalization=att_weight_normalization, )
deep_input_emb = Concatenate()([deep_input_emb, hist])
deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, use_bn, seed=seed)(dnn_input)
final_logit = Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(output)
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
if use_negsampling:
model.add_loss(alpha * aux_loss_1)
try:
tf.keras.backend.get_session().run(tf.global_variables_initializer())
except AttributeError:
tf.compat.v1.keras.backend.get_session().run(tf.compat.v1.global_variables_initializer())
tf.compat.v1.experimental.output_all_intermediates(True)
return model
| 49.666667
| 184
| 0.674818
|
4c6601ccab910a790b2de29c0fc35a4c7888f03a
| 5,944
|
py
|
Python
|
landsat/landsat_cli.py
|
kclosu/Landsat578
|
a38127f19fbf510a9d4bb6670ae1f849ed6cffde
|
[
"Apache-2.0"
] | 58
|
2017-04-28T09:40:12.000Z
|
2022-01-05T08:48:26.000Z
|
landsat/landsat_cli.py
|
kclosu/Landsat578
|
a38127f19fbf510a9d4bb6670ae1f849ed6cffde
|
[
"Apache-2.0"
] | 30
|
2017-04-28T21:19:30.000Z
|
2021-12-13T19:44:17.000Z
|
landsat/landsat_cli.py
|
kclosu/Landsat578
|
a38127f19fbf510a9d4bb6670ae1f849ed6cffde
|
[
"Apache-2.0"
] | 28
|
2017-05-25T18:24:28.000Z
|
2021-09-24T19:46:24.000Z
|
#!/usr/bin/env python
# ===============================================================================
# Copyright 2017 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import print_function, absolute_import
import os
import argparse
import sys
import yaml
sys.path.append(os.path.dirname(__file__).replace('tests', 'landsat'))
sys.path.append(os.path.dirname(__file__))
from landsat.google_download import GoogleDownload
from landsat.update_landsat_metadata import update_metadata_lists
class TooFewInputsError(Exception):
pass
DEFAULT_CFG = '''
# date format: 'YYYY-MM-DD'
start: '2007-05-01'
end: '2007-05-31'
path: 43
row: 30
latitude:
longitude:
output_path: None
satellite: 5
return_list: False
zipped: True
max_cloud_percent: 100
'''
CONFIG_PLACEMENT = os.path.dirname(__file__)
def create_parser():
parser = argparse.ArgumentParser(prog='landsat', description='Download and unzip landsat data.')
parser.add_argument('--satellite', '-sat', help='Satellite number: 1-8, except 6', type=int)
parser.add_argument('--start', help='Start date in format YYYY-MM-DD', type=str)
parser.add_argument('--end', help='End date in format YYYY-MM-DD', type=str)
parser.add_argument('-lat', '--latitude', help='Latitude, decimal degrees', type=float, default=None)
parser.add_argument('-lon', '--longitude', help='Longitude, decimal degrees', type=float, default=None)
parser.add_argument('-p', '--path', help='The path', type=str, default=None)
parser.add_argument('-r', '--row', help='The row', type=str, default=None)
parser.add_argument('-o', '--output-path', help='Output directory', default=os.getcwd())
parser.add_argument('-conf', '--configuration', help='Path to your configuration file. If a directory is provided,'
'a template cofiguration file will be created there.')
parser.add_argument('-cs', '--clear-scenes', help='Path to your clear scenes file.')
parser.add_argument('--return-list', help='Just return list of images without downloading', action='store_true',
default=False)
parser.add_argument('--zipped', help='Download .tar.gz file(s), without unzipping',
action='store_true', default=False)
parser.add_argument('--max-cloud-percent', help='Maximum percent of of image obscured by clouds accepted,'
' type integer', type=float, default=100)
parser.add_argument('--update-scenes', help='Update the scenes list this program uses to discover the '
'latest imagery.', default=False, action='store_true')
return parser
def main(args):
return_scene_list = False
if args:
cfg = {}
for arg in vars(args):
var = getattr(args, arg)
if var is not None:
cfg[arg] = var
if cfg['update_scenes']:
update_metadata_lists()
if cfg['return_list']:
return_scene_list = True
if args.configuration:
if os.path.isdir(args.configuration):
print('Creating template configuration file at {}.'.format(args.configuration))
check_config(args.configuration)
with open(args.configuration, 'r') as rfile:
ycfg = yaml.load(rfile)
cfg.update(ycfg)
del cfg['return_list']
del cfg['configuration']
del cfg['update_scenes']
del cfg['clear_scenes']
del cfg['pymetric_root']
g = GoogleDownload(**cfg)
if return_scene_list:
if cfg['max_cloud_percent'] < 100.0:
return g.candidate_scenes(return_list=True)
else:
return g.candidate_scenes(return_list=True,list_type='all')
g.download()
else:
del cfg['return_list']
del cfg['update_scenes']
g = GoogleDownload(**cfg)
if return_scene_list:
if cfg['max_cloud_percent'] < 100.0:
return g.candidate_scenes(return_list=True)
else:
return g.candidate_scenes(return_list=True,list_type='all')
else:
if cfg['max_cloud_percent'] < 100.0:
g.download()
else:
g.download(list_type='all')
def cli_runner():
parser = create_parser()
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return main(args)
def check_config(dirname):
path = os.path.join(dirname, 'downloader_config.yml')
print('\n*****A default config file {} will be written'.format(path))
with open(path, 'w') as wfile:
print('-------------- DEFAULT CONFIG -----------------')
print(DEFAULT_CFG)
print('-----------------------------------------------')
wfile.write(DEFAULT_CFG)
print('***** Please edit the config file at {} and run the downer again *****\n'.format(
dirname))
sys.exit()
if __name__ == '__main__':
cli_runner()
# ===============================================================================
| 34.964706
| 119
| 0.584287
|
c8ad6b0fc0d11152dbe1a4f6cc2e6c9bb72ce455
| 2,033
|
py
|
Python
|
userbot/modules/webupload.py
|
PratyakshM/DirtyBlack_EXTENDED
|
d842013d969c9ca57c640a944fc3e8b23f1eb68a
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-03-22T11:40:54.000Z
|
2021-03-22T11:40:54.000Z
|
userbot/modules/webupload.py
|
PratyakshM/DirtyBlack_EXTENDED
|
d842013d969c9ca57c640a944fc3e8b23f1eb68a
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/webupload.py
|
PratyakshM/DirtyBlack_EXTENDED
|
d842013d969c9ca57c640a944fc3e8b23f1eb68a
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from telethon import events
import subprocess
import os
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
import time
from userbot import CMD_HELP
from userbot.events import register
@register(pattern="^.webupload ?(.+?|) (?:--)(anonfiles|transfer|filebin|anonymousfiles|megaupload|bayfiles|openload|file.io|vshare)")
async def _(event):
if event.fwd_from:
return
await event.edit("work in progress 🛰")
PROCESS_RUN_TIME = 100
input_str = event.pattern_match.group(1)
selected_transfer = event.pattern_match.group(2)
if input_str:
file_name = input_str
else:
reply = await event.get_reply_message()
file_name = await bot.download_media(reply.media, Var.TEMP_DOWNLOAD_DIRECTORY)
reply_to_id = event.message.id
CMD_WEB = {"anonfiles": "curl -F \"file=@{}\" https://anonfiles.com/api/upload", "transfer": "curl --upload-file \"{}\" https://transfer.sh/{os.path.basename(file_name)}", "anonymousfiles": "curl -F file=\"@{}\" https://api.anonymousfiles.io/", "megaupload": "curl -F file=\"@{}\" https://megaupload.is/api/upload", "bayfiles": "curl -F file=\"@{}\" https://bayfiles.com/api/upload", "openload": "curl -F file=\"@{}\" https://api.openload.cc/upload", "file.io": "curl -F file=\"@{}\" https://file.io" , "vshare": "curl -F file=\"@{}\" https://api.vshare.is/upload"}
try:
selected_one = CMD_WEB[selected_transfer].format(file_name)
except KeyError:
await event.edit("Invalid selected Transfer")
cmd = selected_one
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
await event.edit(f"{stdout.decode()}")
CMD_HELP.update({ "webupload":
"`.webupload` (filename) `--anonfiles` | `transfer` | `anonymousfiles` | `megaupload` | `bayfiles` | `openload` | `files.io` | `vshare` "})
| 52.128205
| 569
| 0.691097
|
1166259a0ba644ee776547a06c19a450da465254
| 6,539
|
py
|
Python
|
env/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_3_1.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_3_1.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_3_1.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_3_1'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_3_1',error_checker=_errors._error_checker)
GL_ACTIVE_UNIFORM_BLOCKS=_C('GL_ACTIVE_UNIFORM_BLOCKS',0x8A36)
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH=_C('GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH',0x8A35)
GL_COPY_READ_BUFFER=_C('GL_COPY_READ_BUFFER',0x8F36)
GL_COPY_WRITE_BUFFER=_C('GL_COPY_WRITE_BUFFER',0x8F37)
GL_INT_SAMPLER_2D_RECT=_C('GL_INT_SAMPLER_2D_RECT',0x8DCD)
GL_INT_SAMPLER_BUFFER=_C('GL_INT_SAMPLER_BUFFER',0x8DD0)
GL_INVALID_INDEX=_C('GL_INVALID_INDEX',0xFFFFFFFF)
GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS',0x8A33)
GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS',0x8A32)
GL_MAX_COMBINED_UNIFORM_BLOCKS=_C('GL_MAX_COMBINED_UNIFORM_BLOCKS',0x8A2E)
GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS',0x8A31)
GL_MAX_FRAGMENT_UNIFORM_BLOCKS=_C('GL_MAX_FRAGMENT_UNIFORM_BLOCKS',0x8A2D)
GL_MAX_GEOMETRY_UNIFORM_BLOCKS=_C('GL_MAX_GEOMETRY_UNIFORM_BLOCKS',0x8A2C)
GL_MAX_RECTANGLE_TEXTURE_SIZE=_C('GL_MAX_RECTANGLE_TEXTURE_SIZE',0x84F8)
GL_MAX_TEXTURE_BUFFER_SIZE=_C('GL_MAX_TEXTURE_BUFFER_SIZE',0x8C2B)
GL_MAX_UNIFORM_BLOCK_SIZE=_C('GL_MAX_UNIFORM_BLOCK_SIZE',0x8A30)
GL_MAX_UNIFORM_BUFFER_BINDINGS=_C('GL_MAX_UNIFORM_BUFFER_BINDINGS',0x8A2F)
GL_MAX_VERTEX_UNIFORM_BLOCKS=_C('GL_MAX_VERTEX_UNIFORM_BLOCKS',0x8A2B)
GL_PRIMITIVE_RESTART=_C('GL_PRIMITIVE_RESTART',0x8F9D)
GL_PRIMITIVE_RESTART_INDEX=_C('GL_PRIMITIVE_RESTART_INDEX',0x8F9E)
GL_PROXY_TEXTURE_RECTANGLE=_C('GL_PROXY_TEXTURE_RECTANGLE',0x84F7)
GL_R16_SNORM=_C('GL_R16_SNORM',0x8F98)
GL_R8_SNORM=_C('GL_R8_SNORM',0x8F94)
GL_RG16_SNORM=_C('GL_RG16_SNORM',0x8F99)
GL_RG8_SNORM=_C('GL_RG8_SNORM',0x8F95)
GL_RGB16_SNORM=_C('GL_RGB16_SNORM',0x8F9A)
GL_RGB8_SNORM=_C('GL_RGB8_SNORM',0x8F96)
GL_RGBA16_SNORM=_C('GL_RGBA16_SNORM',0x8F9B)
GL_RGBA8_SNORM=_C('GL_RGBA8_SNORM',0x8F97)
GL_SAMPLER_2D_RECT=_C('GL_SAMPLER_2D_RECT',0x8B63)
GL_SAMPLER_2D_RECT_SHADOW=_C('GL_SAMPLER_2D_RECT_SHADOW',0x8B64)
GL_SAMPLER_BUFFER=_C('GL_SAMPLER_BUFFER',0x8DC2)
GL_SIGNED_NORMALIZED=_C('GL_SIGNED_NORMALIZED',0x8F9C)
GL_TEXTURE_BINDING_BUFFER=_C('GL_TEXTURE_BINDING_BUFFER',0x8C2C)
GL_TEXTURE_BINDING_RECTANGLE=_C('GL_TEXTURE_BINDING_RECTANGLE',0x84F6)
GL_TEXTURE_BUFFER=_C('GL_TEXTURE_BUFFER',0x8C2A)
GL_TEXTURE_BUFFER_DATA_STORE_BINDING=_C('GL_TEXTURE_BUFFER_DATA_STORE_BINDING',0x8C2D)
GL_TEXTURE_RECTANGLE=_C('GL_TEXTURE_RECTANGLE',0x84F5)
GL_UNIFORM_ARRAY_STRIDE=_C('GL_UNIFORM_ARRAY_STRIDE',0x8A3C)
GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS=_C('GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS',0x8A42)
GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES=_C('GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES',0x8A43)
GL_UNIFORM_BLOCK_BINDING=_C('GL_UNIFORM_BLOCK_BINDING',0x8A3F)
GL_UNIFORM_BLOCK_DATA_SIZE=_C('GL_UNIFORM_BLOCK_DATA_SIZE',0x8A40)
GL_UNIFORM_BLOCK_INDEX=_C('GL_UNIFORM_BLOCK_INDEX',0x8A3A)
GL_UNIFORM_BLOCK_NAME_LENGTH=_C('GL_UNIFORM_BLOCK_NAME_LENGTH',0x8A41)
GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER',0x8A46)
GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER',0x8A45)
GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER',0x8A44)
GL_UNIFORM_BUFFER=_C('GL_UNIFORM_BUFFER',0x8A11)
GL_UNIFORM_BUFFER_BINDING=_C('GL_UNIFORM_BUFFER_BINDING',0x8A28)
GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT=_C('GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT',0x8A34)
GL_UNIFORM_BUFFER_SIZE=_C('GL_UNIFORM_BUFFER_SIZE',0x8A2A)
GL_UNIFORM_BUFFER_START=_C('GL_UNIFORM_BUFFER_START',0x8A29)
GL_UNIFORM_IS_ROW_MAJOR=_C('GL_UNIFORM_IS_ROW_MAJOR',0x8A3E)
GL_UNIFORM_MATRIX_STRIDE=_C('GL_UNIFORM_MATRIX_STRIDE',0x8A3D)
GL_UNIFORM_NAME_LENGTH=_C('GL_UNIFORM_NAME_LENGTH',0x8A39)
GL_UNIFORM_OFFSET=_C('GL_UNIFORM_OFFSET',0x8A3B)
GL_UNIFORM_SIZE=_C('GL_UNIFORM_SIZE',0x8A38)
GL_UNIFORM_TYPE=_C('GL_UNIFORM_TYPE',0x8A37)
GL_UNSIGNED_INT_SAMPLER_2D_RECT=_C('GL_UNSIGNED_INT_SAMPLER_2D_RECT',0x8DD5)
GL_UNSIGNED_INT_SAMPLER_BUFFER=_C('GL_UNSIGNED_INT_SAMPLER_BUFFER',0x8DD8)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint)
def glBindBufferBase(target,index,buffer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr)
def glBindBufferRange(target,index,buffer,offset,size):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLintptr,_cs.GLintptr,_cs.GLsizeiptr)
def glCopyBufferSubData(readTarget,writeTarget,readOffset,writeOffset,size):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glDrawArraysInstanced(mode,first,count,instancecount):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.c_void_p,_cs.GLsizei)
def glDrawElementsInstanced(mode,count,type,indices,instancecount):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveUniformBlockName(program,uniformBlockIndex,bufSize,length,uniformBlockName):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetActiveUniformBlockiv(program,uniformBlockIndex,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveUniformName(program,uniformIndex,bufSize,length,uniformName):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,_cs.GLenum,arrays.GLintArray)
def glGetActiveUniformsiv(program,uniformCount,uniformIndices,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLintArray)
def glGetIntegeri_v(target,index,data):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint,arrays.GLcharArray)
def glGetUniformBlockIndex(program,uniformBlockName):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,ctypes.POINTER( ctypes.POINTER( _cs.GLchar )),arrays.GLuintArray)
def glGetUniformIndices(program,uniformCount,uniformNames,uniformIndices):pass
@_f
@_p.types(None,_cs.GLuint)
def glPrimitiveRestartIndex(index):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint)
def glTexBuffer(target,internalformat,buffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glUniformBlockBinding(program,uniformBlockIndex,uniformBlockBinding):pass
| 54.491667
| 111
| 0.866187
|
ceedab0ad5a31516210658f265214b59c5d304bb
| 34,507
|
py
|
Python
|
tests/nightly/test_large_vector.py
|
litaotju/incubator-mxnet
|
83b28911cbc4a49521c69d8c4330a7234436b2fa
|
[
"Apache-2.0"
] | null | null | null |
tests/nightly/test_large_vector.py
|
litaotju/incubator-mxnet
|
83b28911cbc4a49521c69d8c4330a7234436b2fa
|
[
"Apache-2.0"
] | null | null | null |
tests/nightly/test_large_vector.py
|
litaotju/incubator-mxnet
|
83b28911cbc4a49521c69d8c4330a7234436b2fa
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
import math
import numpy as np
import mxnet as mx
from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector
from mxnet import gluon, nd
from tests.python.unittest.common import with_seed
from nose.tools import with_setup
# dimension constants
LARGE_X = 4300000000
MEDIUM_X = 1000000000
def test_nn():
def check_dense():
data = mx.nd.ones(shape=LARGE_X)
linear = gluon.nn.Dense(2)
linear.initialize()
res = linear(data)
assert res.shape == (LARGE_X, 2)
def check_regression():
shape = (LARGE_X, )
def check_regression(symbol, forward, shape):
# init executor
data_s = mx.symbol.Variable('data')
label_s = mx.symbol.Variable('label')
out_s = symbol(data=data_s, label=label_s)
exe = out_s.simple_bind(ctx=mx.cpu(0), data=shape, label=shape)
arg_map = dict(zip(out_s.list_arguments(), exe.arg_arrays))
# init data
data = mx.random.uniform(-1, -1, shape)
arg_map["data"][:] = data
atol = 1e-5
density = 0.5
stype = 'default'
label = arg_map["label"]
label[:] = rand_ndarray(shape, stype, density=density)
exe.forward(is_train=True)
exe.backward()
np_out = forward(data.asnumpy())
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, atol=atol)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
shape)
def check_sign():
a = mx.nd.random.normal(-1, 1, shape=LARGE_X)
mx_res = mx.nd.sign(a)
assert_almost_equal(mx_res[-1].asnumpy(), np.sign(a[-1].asnumpy()))
# TODO: correctness of layernorm
# numpy implementation for large vector is flaky
def check_layer_norm():
axis = 0
eps = 1E-5
in_shape = LARGE_X
data = nd.random.normal(0, 1, in_shape)
gamma = nd.random.normal(0, 1, in_shape)
beta = nd.random.normal(0, 1, in_shape)
mx_out = nd.LayerNorm(data, gamma, beta, axis, eps)
assert mx_out.shape == (in_shape,)
# TODO: correctness of batchnorm
# in future, we could test if mean, var of output
# matches target output's mean, var
def check_batchnorm():
shape = LARGE_X
axis = 0 # since vector
data = mx.nd.ones(shape=shape)
bn_gamma = mx.nd.random.uniform(shape=shape)
bn_beta = mx.nd.random.uniform(shape=shape)
bn_running_mean = mx.nd.zeros(shape)
bn_running_var = mx.nd.ones(shape)
output = mx.nd.BatchNorm(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var, axis=axis)
assert output.shape == (shape,)
def check_sequence_mask():
# Sequence Mask input [max_sequence_length, batch_size]
# test with input batch_size = 2
a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2)
# test as identity operator
b = nd.SequenceMask(a)
assert b[-1][0] == a[-1][0]
assert b.shape == a.shape
# test with default mask
b = nd.SequenceMask(a, sequence_length=nd.array([1, 1]),
use_sequence_length=True)
assert b[0][1] == a[0][1] # first sequence of each batch kept
assert b[-1][-1] != a[-1][-1] # rest sequences masked
assert b[-1][-1] == 0
# test with mask value
b = nd.SequenceMask(a, sequence_length=nd.array([1, 1]),
use_sequence_length=True, value=-1)
assert b[-1][-1] == -1
def check_sequence_reverse():
a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2)
# test as reverse operator
b = nd.SequenceReverse(a)
assert b[-1][0] == a[0][0]
assert b.shape == a.shape
# test with sequence length
b = nd.SequenceReverse(a, sequence_length=nd.array([2, 3]),
use_sequence_length=True)
assert b[1][0] == a[0][0] # check if reversed
assert b[-1][0] == a[-1][0] # check if intact
assert b.shape == a.shape
def check_sequence_last():
a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2)
# test if returns last sequence
b = nd.SequenceLast(a)
assert_almost_equal(b.asnumpy(), a[-1].asnumpy())
assert b.shape == (2,)
# test with sequence length
# parameter sequence_length - NDArray with shape (batch_size)
# (2,3) indicates 2nd sequence from batch 1 and 3rd sequence from batch 2
# need to mention dtype = int64 for sequence_length ndarray to support large indices
# else it defaults to float32 and errors
b = nd.SequenceLast(a, sequence_length=mx.nd.array([2, 3], dtype="int64"),
use_sequence_length=True)
# check if it takes 2nd sequence from the first batch
assert b[0] == a[1][0]
check_dense()
check_regression()
check_sign()
check_layer_norm()
check_batchnorm()
check_sequence_mask()
check_sequence_reverse()
check_sequence_last()
def test_tensor():
def check_ndarray_zeros():
a = nd.zeros(shape=LARGE_X)
assert a[-1] == 0
assert a.shape == (LARGE_X,)
assert a.size == LARGE_X
def check_ndarray_ones():
a = nd.ones(shape=LARGE_X)
assert a[-1] == 1
assert nd.sum(a) == LARGE_X
def check_ndarray_empty():
a = nd.empty(LARGE_X)
assert a.shape == (LARGE_X,)
@with_seed()
def check_ndarray_random_uniform():
a = nd.random.uniform(shape=LARGE_X)
assert a[-1] != 0
@with_seed()
def check_ndarray_random_randint():
# check if randint can generate value greater than 2**32 (large)
low = 2**32
high = 2**34
a = nd.random.randint(low, high, dtype=np.int64, shape=LARGE_X).asnumpy()
assert a.shape == (LARGE_X,)
assert (a >= low).all() and (a < high).all()
@with_seed()
def check_ndarray_random_exponential():
a = nd.random.exponential(shape=LARGE_X)
assert a[-1] >= 0.
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_gamma():
a = nd.random.gamma(shape=LARGE_X)
assert a[-1] >= 0.
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_generalized_negative_binomial():
a = nd.random.generalized_negative_binomial(shape=LARGE_X)
assert a[-1] >= 0.
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_multinomial():
a = nd.random.multinomial(nd.random.uniform(shape=LARGE_X))
assert a[-1] >= 0.
assert a.shape[0] == 1
@with_seed()
def check_ndarray_random_negative_binomial():
a = nd.random.negative_binomial(shape=LARGE_X)
assert a[-1] >= 0.
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_normal():
a = nd.random.normal(shape=LARGE_X)
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_poisson():
a = nd.random.poisson(shape=LARGE_X)
assert a[-1] >= 0.
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_randn():
a = nd.random.randn(LARGE_X)
assert a.shape[0] == LARGE_X
@with_seed()
def check_ndarray_random_shuffle():
a = nd.ones(shape=LARGE_X)
a[-1] = 3
a = nd.random.shuffle(a)
unique_a = np.unique(a.asnumpy())
assert len(unique_a) == 2 # only 2 unique values
assert unique_a[0] == 1 # first unique value is 1
assert unique_a[1] == 3 # second unique value is 3
assert a.shape[0] == LARGE_X
def check_full():
a = nd.full(LARGE_X, 3)
assert a.shape[0] == LARGE_X
assert a[LARGE_X // 2] == 3
assert a[-1] == 3
def check_repeat():
x = create_vector(size=LARGE_X//2)
y = nd.repeat(x, repeats=2, axis = 0)
assert y.shape[0] == LARGE_X
assert y[1] == 0
assert y[LARGE_X-1] == LARGE_X//2-1
def check_clip():
a = create_vector(LARGE_X)
res = nd.clip(a, a_min=100, a_max=1000)
assert res[-1] == 1000
def check_slice():
a = nd.ones(LARGE_X)
res = nd.slice(a, begin=(LARGE_X - MEDIUM_X), end=LARGE_X)
assert res.shape[0] == MEDIUM_X
assert res[0] == 1
def check_slice_assign():
a = nd.ones(shape=LARGE_X)
a[LARGE_X-1:LARGE_X] = 1000
assert np.sum(a[-1].asnumpy() == 1000) == 1
def check_take():
a = nd.ones(shape=LARGE_X)
idx = nd.arange(LARGE_X - 1000, LARGE_X)
res = nd.take(a, idx)
assert np.sum(res.asnumpy() == 1) == res.shape[0]
def check_expand_dims():
a = nd.ones(shape=LARGE_X)
res = nd.expand_dims(a, axis=0)
assert res[0][0] == 1
assert res.shape == (1, a.shape[0])
def check_squeeze():
a = nd.ones(shape=LARGE_X)
data = nd.expand_dims(a, axis=0)
res = nd.squeeze(data)
assert a[0] == res[0]
assert res.shape == a.shape
def check_broadcast_div():
a = nd.ones(shape=LARGE_X)
b = nd.ones(shape=LARGE_X) * 2
res = a / b
assert np.sum(res.asnumpy() == 0.5) == a.shape[0]
def check_size():
b = create_vector(size=LARGE_X)
# explicit wait_to_read()
assert b[0] == 0
assert b.size == LARGE_X
def check_copy():
a = nd.ones(LARGE_X)
b = a.copy()
assert a[0] == b[0]
assert b.shape == a.shape
assert b.size == LARGE_X
def check_copy_to():
a = create_vector(size=LARGE_X)
# keeping dtype same as input uses parallel copy which is much faster
b = nd.zeros(LARGE_X, dtype=np.int64)
c = a.copyto(b)
assert c is b
assert b[-1] == LARGE_X-1
assert b[0] == 0
def check_zeros_like():
a = nd.ones(LARGE_X)
b = nd.zeros_like(a)
assert b[-1] == 0
assert b.shape == a.shape
def check_ones_like():
a = nd.zeros(LARGE_X)
b = nd.ones_like(a)
assert b[-1] == 1
assert b.shape == a.shape
def check_shape():
b = create_vector(size=LARGE_X)
# explicit wait_to_read()
assert b[0] == 0
assert b.shape[0] == LARGE_X
def check_concat():
a = nd.ones(LARGE_X)
b = nd.zeros(LARGE_X)
c = nd.concat(a, b, dim=0)
assert c[0] == 1
assert c[-1] == 0
assert c.shape[0] == (2 * LARGE_X)
def check_slice_like():
a = create_vector(size=LARGE_X)
b = nd.ones(LARGE_X//2)
c = nd.slice_like(a, b)
assert c.shape == b.shape
assert c[0] == 0
assert c[-1] == (LARGE_X // 2 - 1)
def check_slice_axis():
a = create_vector(size=LARGE_X)
med = LARGE_X // 2
c = nd.slice_axis(a, axis=0, begin=0, end=med)
assert c.shape[0] == a.shape[0] // 2
assert c[-1][0] == (med - 1)
def check_gather():
arr = mx.nd.ones(LARGE_X)
# Passing dtype=np.int64 since randomly generated indices are
# very large that exceeds int32 limits.
idx = mx.nd.random.randint(0, LARGE_X, 10, dtype=np.int64)
# Calls gather_nd internally
tmp = arr[idx]
assert np.sum(tmp.asnumpy() == 1) == 10
# Calls gather_nd internally
arr[idx] += 1
assert np.sum(arr[idx].asnumpy() == 2) == 10
def check_infer_shape():
data_1 = mx.symbol.Variable('data_1')
data_2 = mx.symbol.Variable('data_2')
add = data_1+data_2
# > add.infer_shape(data_1=(LARGE_X,), data_2=(LARGE_X,))
# OUTPUT - arg_shapes, out_shapes, aux_shapes
_, out_shapes, _ = add.infer_shape(data_1=(LARGE_X,), data_2=(LARGE_X,))
assert out_shapes == [(LARGE_X,)]
def check_astype():
x = create_vector(size=LARGE_X//4)
x = nd.tile(x, 4)
y = x.astype('int32')
assert y.dtype == np.int32
assert y[-1] == LARGE_X//4-1
def check_cast():
x = create_vector(size=LARGE_X//4)
x = nd.tile(x, 4)
y = nd.cast(x, np.int32)
assert y.dtype == np.int32
assert y[-1] == LARGE_X//4-1
def check_load_save():
x = create_vector(size=LARGE_X)
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'large_vector')
nd.save(tmpfile, [x])
y = nd.load(tmpfile)
y = y[0]
assert x[0] == y[0]
assert x[-1] == y[-1]
def check_binary_broadcast():
def check_correctness(mxnet_op, numpy_op, atol=1e-3):
a = mx.nd.ones(LARGE_X).as_np_ndarray()
b = 2*mx.nd.ones(LARGE_X).as_np_ndarray()
res = mxnet_op(a, b)
np_res = numpy_op(1, 2)
assert np.abs(res[-1] - np_res) < atol
check_correctness(mx.np.arctan2, np.arctan2)
check_correctness(mx.np.hypot, np.hypot)
check_ndarray_zeros()
check_ndarray_ones()
check_ndarray_empty()
check_ndarray_random_uniform()
check_ndarray_random_randint()
check_ndarray_random_exponential()
check_ndarray_random_gamma()
check_ndarray_random_generalized_negative_binomial()
check_ndarray_random_multinomial()
check_ndarray_random_negative_binomial()
check_ndarray_random_normal()
check_ndarray_random_poisson()
check_ndarray_random_randn()
check_ndarray_random_shuffle()
check_full()
check_repeat()
check_clip()
check_slice()
check_slice_assign()
check_take()
check_expand_dims()
check_squeeze()
check_broadcast_div()
check_size()
check_copy()
check_copy_to()
check_zeros_like()
check_ones_like()
check_shape()
check_concat()
check_slice_like()
check_slice_axis()
check_gather()
check_infer_shape()
check_astype()
check_cast()
check_load_save()
check_binary_broadcast()
def test_basic():
def check_elementwise():
a = nd.ones(shape=LARGE_X)
b = nd.ones(shape=LARGE_X)
res = a + b
assert res[-1].asnumpy() == 2
res = a + 1
assert res[-1].asnumpy() == 2
res = nd.sqrt(a + 8)
assert res[-1].asnumpy() == 3
def check_argmin():
a = create_vector(LARGE_X, dtype=np.float32)
assert a[0] == 0
idx = mx.nd.argmin(a, axis=0)
assert idx[0] == 0
assert idx.shape[0] == 1
def check_argsort():
a = create_vector(size=LARGE_X)
s = nd.argsort(a, axis=0, is_ascend=False, dtype=np.int64)
assert s[0] == (LARGE_X - 1)
def check_sort():
a = create_vector(size=LARGE_X)
def check_descend(x):
s = nd.sort(x, axis=0, is_ascend=False)
assert s[-1] == 0
def check_ascend(x):
s = nd.sort(x, is_ascend=True)
assert s[0] == 0
check_descend(a)
check_ascend(a)
def check_topk():
a = create_vector(size=LARGE_X)
ind = nd.topk(a, k=10, axis=0, dtype=np.int64)
for i in range(10):
assert ind[i] == (LARGE_X - i - 1)
ind, val = mx.nd.topk(a, k=3, axis=0, dtype=np.int64, ret_typ="both", is_ascend=False)
assert np.all(ind == val)
val = nd.topk(a, k=1, axis=0, dtype=np.int64, ret_typ="value")
assert val == (LARGE_X - 1)
def check_mean():
a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64)
b = nd.mean(a, axis=0)
assert b == 0
def check_exponent_logarithm_operators():
a = 2*nd.ones(shape=LARGE_X)
# exponent
result = nd.exp(a)
assert result[-1] == 7.389056
assert result.shape == a.shape
# exponent minus 1
result = nd.expm1(a)
assert result[-1] == 6.389056
assert result.shape == a.shape
# log2
result = nd.log2(a)
assert result[-1] == 1
assert result.shape == a.shape
# log10
result = nd.log10(a)
assert result[-1] == 0.30103
assert result.shape == a.shape
# log1p
result = nd.log1p(a)
assert result[-1] == 1.0986123
assert result.shape == a.shape
# log
result = nd.log(a)
assert result[-1] == 0.6931472
assert result.shape == a.shape
def check_power_operators():
a = 2*nd.ones(shape=LARGE_X)
# sqrt
result = nd.sqrt(a)
assert result[-1] == 1.4142135
assert result.shape == a.shape
# rsqrt
result = nd.rsqrt(a)
assert result[-1] == 0.70710677
assert result.shape == a.shape
# cbrt
result = nd.cbrt(a)
assert result[-1] == 1.2599211
assert result.shape == a.shape
# rcbrt
result = nd.rcbrt(a)
assert result[-1] == 0.7937005
assert result.shape == a.shape
# square
result = nd.square(a)
assert result[-1] == 4
assert result.shape == a.shape
# reciprocal
result = nd.reciprocal(a)
assert result[-1] == 0.5
assert result.shape == a.shape
def check_add():
a = nd.ones(shape=LARGE_X)
b = nd.ones(shape=LARGE_X)
c = b
c = c.__add__(a)
assert c[-1] == 2
assert c.shape == a.shape
def check_sub():
a = 3*nd.ones(shape=LARGE_X)
b = nd.ones(shape=LARGE_X)
c = b
c = c.__sub__(a)
assert c[-1] == -2
assert c.shape == a.shape
def check_rsub():
a = 3*nd.ones(shape=LARGE_X)
b = nd.ones(shape=LARGE_X)
c = b
c = c.__rsub__(a)
assert c[-1] == 2
assert c.shape == a.shape
def check_neg():
a = nd.ones(shape=LARGE_X)
c = a
c = c.__neg__()
assert c[-1] == -1
assert c.shape == a.shape
def check_mul():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__mul__(a)
assert c[-1] == 6
assert c.shape == a.shape
def check_div():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__div__(a)
assert c[-1] == 3/2
assert c.shape == a.shape
def check_rdiv():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__rdiv__(a)
assert c[-1] == 2/3
assert c.shape == a.shape
def check_mod():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__mod__(a)
assert c[-1] == 1
assert c.shape == a.shape
def check_rmod():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__rmod__(a)
assert c[-1] == 2
assert c.shape == a.shape
def check_imod():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__imod__(a)
assert c[-1] == 1
assert c.shape == a.shape
def check_pow():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__pow__(a)
assert c[-1] == 9
assert c.shape == a.shape
def check_rpow():
a = 2*nd.ones(shape=LARGE_X)
b = 3*nd.ones(shape=LARGE_X)
c = b
c = c.__rpow__(a)
assert c[-1] == 8
assert c.shape == a.shape
def check_sum():
a = nd.ones(LARGE_X)
b = nd.sum(a, axis=0)
assert b[0] == LARGE_X
def check_prod():
a = nd.ones(LARGE_X)
b = nd.prod(a, axis=0)
assert b[0] == 1
def check_min():
a = create_vector(size=LARGE_X)
b = nd.min(a, axis=0)
assert b[0] == 0
assert b[-1] == 0
def check_max():
a = create_vector(size=LARGE_X)
b = nd.max(a, axis=0)
assert b[0] == (LARGE_X - 1)
def check_argmax():
a = nd.ones(LARGE_X)
b = nd.zeros(LARGE_X)
c = nd.concat(a, b, dim=0)
d = nd.argmax(c, axis=0)
assert c.shape[0] == (2 * LARGE_X)
assert d == 0
def check_iadd():
a = nd.ones(LARGE_X)
b = nd.ones(LARGE_X)
c = b
c += a
assert c.shape == a.shape
assert c[-1] == 2
def check_isub():
a = nd.full(LARGE_X, 3)
b = nd.ones(LARGE_X)
c = a
c -= b
assert c.shape == a.shape
assert c[-1] == 2
def check_imul():
a = nd.full(LARGE_X, 3)
b = nd.ones(LARGE_X)
c = b
c *= a
assert c.shape == a.shape
assert c[-1] == 3
def check_idiv():
a = nd.full(LARGE_X, 4)
b = nd.full(LARGE_X, 2)
c = a
c /= b
assert c.shape == a.shape
assert c[-1] == 2
def check_eq():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 3)
c = (a == b)
assert (c.asnumpy() == 1).all()
def check_neq():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = (a != b)
assert (c.asnumpy() == 1).all()
def check_lt():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
d = (a <= b)
assert (d.asnumpy() == 1).all()
def check_lte():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = nd.full(LARGE_X, 2)
d = (a <= b)
assert (d.asnumpy() == 1).all()
d = (a <= c)
assert (d.asnumpy() == 1).all()
def check_gt():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
d = (a > b)
assert (d.asnumpy() == 1).all()
def check_gte():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
c = nd.full(LARGE_X, 3)
d = (a >= b)
assert (d.asnumpy() == 1).all()
d = (a >= c)
assert (d.asnumpy() == 1).all()
def check_logical():
def check_logical_and(a, b):
mx_res = mx.nd.logical_and(a, b)
assert_almost_equal(mx_res[-1].asnumpy(), np.logical_and(a[-1].asnumpy(), b[-1].asnumpy()))
def check_logical_or(a, b):
mx_res = mx.nd.logical_or(a, b)
assert_almost_equal(mx_res[-1].asnumpy(), np.logical_or(a[-1].asnumpy(), b[-1].asnumpy()))
def check_logical_not(a, b):
mx_res = mx.nd.logical_not(a, b)
assert_almost_equal(mx_res[-1].asnumpy(), np.logical_not(a[-1].asnumpy(), b[-1].asnumpy()))
def check_logical_xor(a, b):
mx_res = mx.nd.logical_xor(a, b)
assert_almost_equal(mx_res[-1].asnumpy(), np.logical_xor(a[-1].asnumpy(), b[-1].asnumpy()))
a = mx.nd.ones(LARGE_X)
b = mx.nd.zeros(LARGE_X)
check_logical_and(a, b)
check_logical_or(a, b)
check_logical_not(a, b)
check_logical_xor(a, b)
def create_input_for_rounding_ops():
# Creates an vector with values (-LARGE/2 .... -2, -1, 0, 1, 2, .... , LARGE/2-1)
# then divides each element by 2 i.e (-LARGE/4 .... -1, -0.5, 0, 0.5, 1, .... , LARGE/4-1)
inp = nd.arange(-LARGE_X//2, LARGE_X//2, dtype=np.float64)
inp = inp/2
return inp
def assert_correctness_of_rounding_ops(output, mid, expected_vals):
# checks verifies 5 values at the middle positions of the input vector
# i.e mid-2, mid-1, mid, mid+1, mid+2
output_idx_to_inspect = [mid-2, mid-1, mid, mid+1, mid+2]
for i in range(len(output_idx_to_inspect)):
assert output[output_idx_to_inspect[i]] == expected_vals[i]
def check_rounding_ops():
x = create_input_for_rounding_ops()
def check_ceil():
y = nd.ceil(x)
# expected ouput for middle 5 values after applying ceil()
expected_output = [-1, 0, 0, 1, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
def check_fix():
y = nd.fix(x)
# expected ouput for middle 5 values after applying fix()
expected_output = [-1, 0, 0, 0, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
def check_floor():
y = nd.floor(x)
# expected ouput for middle 5 values after applying floor()
expected_output = [-1, -1, 0, 0, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
def check_rint():
y = nd.rint(x)
# expected ouput for middle 5 values after applying rint()
expected_output = [-1, -1, 0, 0, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
def check_round():
y = nd.round(x)
# expected ouput for middle 5 values after applying round()
expected_output = [-1, -1, 0, 1, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
def check_trunc():
y = nd.trunc(x)
# expected ouput for middle 5 values after applying trunc()
expected_output = [-1, 0, 0, 0, 1]
assert_correctness_of_rounding_ops(y, LARGE_X//2, expected_output)
check_ceil()
check_fix()
check_floor()
check_rint()
check_round()
check_trunc()
def create_input_for_trigonometric_ops(vals):
# Creates large vector input of size(LARGE_X) from vals using tile operator
inp = nd.array(vals)
inp = nd.tile(inp, LARGE_X//len(vals))
return inp
def assert_correctness_of_trigonometric_ops(output, expected_vals):
# checks verifies 5 values at positions(0, 1, -3, -2, -1) of the input vector
output_idx_to_inspect = [0, 1, -3, -2, -1]
for i in range(len(output_idx_to_inspect)):
assert np.abs(output[output_idx_to_inspect[i]].asnumpy()-expected_vals[i]) <= 1e-3
def check_trigonometric_ops():
def check_arcsin():
x = create_input_for_trigonometric_ops([-1, -.707, 0, .707, 1])
y = nd.arcsin(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arcsin()
expected_output = [-np.pi/2, -np.pi/4, 0, np.pi/4, np.pi/2]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_arccos():
x = create_input_for_trigonometric_ops([-1, -.707, 0, .707, 1])
y = nd.arccos(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arccos()
expected_output = [np.pi, 3*np.pi/4, np.pi/2, np.pi/4, 0]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_arctan():
x = create_input_for_trigonometric_ops([-np.Inf, -1, 0, 1, np.Inf])
y = nd.arctan(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arctan()
expected_output = [-np.pi/2, -np.pi/4, 0, np.pi/4, np.pi/2]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_sin():
x = create_input_for_trigonometric_ops([-np.pi/2, -np.pi/4, 0, np.pi/4, np.pi/2])
y = nd.sin(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying sin()
expected_output = [-1, -.707, 0, .707, 1]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_cos():
x = create_input_for_trigonometric_ops([0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi])
y = nd.cos(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying cos()
expected_output = [1, .707, 0, -.707, -1]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_tan():
x = create_input_for_trigonometric_ops([-np.pi/6, -np.pi/4, 0, np.pi/4, np.pi/6])
y = nd.tan(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying tan()
expected_output = [-.577, -1, 0, 1, .577]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_arcsinh():
x = create_input_for_trigonometric_ops([-np.pi/2, -np.pi/4, 0, np.pi/4, np.pi/2])
y = nd.arcsinh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arcsinh()
expected_output = [np.arcsinh(-np.pi/2), np.arcsinh(-np.pi/4), 0, np.arcsinh(np.pi/4), np.arcsinh(np.pi/2)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_arccosh():
x = create_input_for_trigonometric_ops([1, np.pi/2, 3*np.pi/4, np.pi, 5*np.pi/4])
y = nd.arccosh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arccosh()
expected_output = [0, np.arccosh(np.pi/2), np.arccosh(3*np.pi/4), np.arccosh(np.pi), np.arccosh(5*np.pi/4)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_arctanh():
x = create_input_for_trigonometric_ops([-1/4, -1/2, 0, 1/4, 1/2])
y = nd.arctanh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying arctanh()
expected_output = [np.arctanh(-1/4), np.arctanh(-1/2), 0, np.arctanh(1/4), np.arctanh(1/2)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_sinh():
x = create_input_for_trigonometric_ops([-np.pi/2, -np.pi/4, 0, np.pi/4, np.pi/2])
y = nd.sinh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying sinh()
expected_output = [np.sinh(-np.pi/2), np.sinh(-np.pi/4), 0, np.sinh(np.pi/4), np.sinh(np.pi/2)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_cosh():
x = create_input_for_trigonometric_ops([0, 1, np.pi/2, 3*np.pi/4, np.pi])
y = nd.cosh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying cosh()
expected_output = [1, np.cosh(1), np.cosh(np.pi/2), np.cosh(3*np.pi/4), np.cosh(np.pi)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_tanh():
x = create_input_for_trigonometric_ops([-1/4, -1/2, 0, 1/4, 1/2])
y = nd.tanh(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying tanh()
expected_output = [np.tanh(-1/4), np.tanh(-1/2), 0, np.tanh(1/4), np.tanh(1/2)]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_radians():
x = create_input_for_trigonometric_ops([0, 90, 180, 270, 360])
y = nd.radians(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying radians()
expected_output = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
assert_correctness_of_trigonometric_ops(y, expected_output)
def check_degrees():
x = create_input_for_trigonometric_ops([0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi])
y = nd.degrees(x)
# expected ouput for indices=(0, 1, -3, -2, -1) after applying degrees()
expected_output = [0, 90, 180, 270, 360]
assert_correctness_of_trigonometric_ops(y, expected_output)
check_arcsin()
check_arccos()
check_arctan()
check_sin()
check_cos()
check_tan()
check_arcsinh()
check_arccosh()
check_arctanh()
check_sinh()
check_cosh()
check_tanh()
check_radians()
check_degrees()
def check_add_n():
x = [nd.ones(LARGE_X)]
y = nd.add_n(*x)
assert y[0] == 1
assert y[-1] == 1
def check_modulo():
x = mx.nd.ones(LARGE_X)*6
y = mx.nd.ones(LARGE_X)*4
z = (x % y)
assert z[0] == 2
assert z[-1] == 2
x = mx.nd.ones(LARGE_X)*5
z = nd.modulo(x, y)
assert z[0] == 1
assert z[-1] == 1
def check_maximum():
x = mx.nd.ones(LARGE_X)*3
y = mx.nd.ones(LARGE_X)*4
z = nd.maximum(x, y)
assert z[0] == 4
assert z[-1] == 4
z = nd.maximum(x, 5)
assert z[0] == 5
assert z[-1] == 5
def check_minimum():
x = mx.nd.ones(LARGE_X)*3
y = mx.nd.ones(LARGE_X)*2
z = nd.minimum(x, y)
assert z[0] == 2
assert z[-1] == 2
z = nd.minimum(x, 5)
assert z[0] == 3
assert z[-1] == 3
check_elementwise()
check_argmin()
check_argsort()
check_sort()
check_topk()
check_mean()
check_exponent_logarithm_operators()
check_power_operators()
check_add()
check_sub()
check_rsub()
check_neg()
check_mul()
check_div()
check_rdiv()
check_mod()
check_rmod()
check_imod()
check_pow()
check_rpow()
check_sum()
check_prod()
check_min()
check_max()
check_argmax()
check_iadd()
check_isub()
check_imul()
check_idiv()
check_eq()
check_neq()
check_lt()
check_lte()
check_gt()
check_gte()
check_logical()
check_rounding_ops()
check_trigonometric_ops()
check_add_n()
check_modulo()
check_maximum()
check_minimum()
if __name__ == '__main__':
import nose
nose.runmodule()
| 32.646168
| 119
| 0.558843
|
b5c799d7c4e219b740d8e429ce56bd0c958d7066
| 16,678
|
py
|
Python
|
addons/purchase_mrp/tests/test_purchase_mrp_flow.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/purchase_mrp/tests/test_purchase_mrp_flow.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/purchase_mrp/tests/test_purchase_mrp_flow.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import Form, TransactionCase
from odoo.tests import Form
class TestSaleMrpFlow(TransactionCase):
def setUp(self):
super(TestSaleMrpFlow, self).setUp()
# Useful models
self.UoM = self.env['uom.uom']
self.categ_unit = self.env.ref('uom.product_uom_categ_unit')
self.categ_kgm = self.env.ref('uom.product_uom_categ_kgm')
self.stock_location = self.env.ref('stock.stock_location_stock')
self.warehouse = self.env.ref('stock.warehouse0')
self.uom_kg = self.env['uom.uom'].search([('category_id', '=', self.categ_kgm.id), ('uom_type', '=', 'reference')],
limit=1)
self.uom_kg.write({
'name': 'Test-KG',
'rounding': 0.000001})
self.uom_gm = self.UoM.create({
'name': 'Test-G',
'category_id': self.categ_kgm.id,
'uom_type': 'smaller',
'factor': 1000.0,
'rounding': 0.001})
self.uom_unit = self.env['uom.uom'].search(
[('category_id', '=', self.categ_unit.id), ('uom_type', '=', 'reference')], limit=1)
self.uom_unit.write({
'name': 'Test-Unit',
'rounding': 0.01})
self.uom_dozen = self.UoM.create({
'name': 'Test-DozenA',
'category_id': self.categ_unit.id,
'factor_inv': 12,
'uom_type': 'bigger',
'rounding': 0.001})
# Creating all components
self.component_a = self._create_product('Comp A', self.uom_unit)
self.component_b = self._create_product('Comp B', self.uom_unit)
self.component_c = self._create_product('Comp C', self.uom_unit)
self.component_d = self._create_product('Comp D', self.uom_unit)
self.component_e = self._create_product('Comp E', self.uom_unit)
self.component_f = self._create_product('Comp F', self.uom_unit)
self.component_g = self._create_product('Comp G', self.uom_unit)
# Create a kit 'kit_1' :
# -----------------------
#
# kit_1 --|- component_a x2
# |- component_b x1
# |- component_c x3
self.kit_1 = self._create_product('Kit 1', self.uom_unit)
self.bom_kit_1 = self.env['mrp.bom'].create({
'product_tmpl_id': self.kit_1.product_tmpl_id.id,
'product_qty': 1.0,
'type': 'phantom'})
BomLine = self.env['mrp.bom.line']
BomLine.create({
'product_id': self.component_a.id,
'product_qty': 2.0,
'bom_id': self.bom_kit_1.id})
BomLine.create({
'product_id': self.component_b.id,
'product_qty': 1.0,
'bom_id': self.bom_kit_1.id})
BomLine.create({
'product_id': self.component_c.id,
'product_qty': 3.0,
'bom_id': self.bom_kit_1.id})
# Create a kit 'kit_parent' :
# ---------------------------
#
# kit_parent --|- kit_2 x2 --|- component_d x1
# | |- kit_1 x2 -------|- component_a x2
# | |- component_b x1
# | |- component_c x3
# |
# |- kit_3 x1 --|- component_f x1
# | |- component_g x2
# |
# |- component_e x1
# Creating all kits
self.kit_2 = self._create_product('Kit 2', self.uom_unit)
self.kit_3 = self._create_product('kit 3', self.uom_unit)
self.kit_parent = self._create_product('Kit Parent', self.uom_unit)
# Linking the kits and the components via some 'phantom' BoMs
bom_kit_2 = self.env['mrp.bom'].create({
'product_tmpl_id': self.kit_2.product_tmpl_id.id,
'product_qty': 1.0,
'type': 'phantom'})
BomLine.create({
'product_id': self.component_d.id,
'product_qty': 1.0,
'bom_id': bom_kit_2.id})
BomLine.create({
'product_id': self.kit_1.id,
'product_qty': 2.0,
'bom_id': bom_kit_2.id})
bom_kit_parent = self.env['mrp.bom'].create({
'product_tmpl_id': self.kit_parent.product_tmpl_id.id,
'product_qty': 1.0,
'type': 'phantom'})
BomLine.create({
'product_id': self.component_e.id,
'product_qty': 1.0,
'bom_id': bom_kit_parent.id})
BomLine.create({
'product_id': self.kit_2.id,
'product_qty': 2.0,
'bom_id': bom_kit_parent.id})
bom_kit_3 = self.env['mrp.bom'].create({
'product_tmpl_id': self.kit_3.product_tmpl_id.id,
'product_qty': 1.0,
'type': 'phantom'})
BomLine.create({
'product_id': self.component_f.id,
'product_qty': 1.0,
'bom_id': bom_kit_3.id})
BomLine.create({
'product_id': self.component_g.id,
'product_qty': 2.0,
'bom_id': bom_kit_3.id})
BomLine.create({
'product_id': self.kit_3.id,
'product_qty': 2.0,
'bom_id': bom_kit_parent.id})
def _create_product(self, name, uom_id, routes=()):
p = Form(self.env['product.product'])
p.name = name
p.type = 'product'
p.uom_id = uom_id
p.uom_po_id = uom_id
p.route_ids.clear()
for r in routes:
p.route_ids.add(r)
return p.save()
# Helper to process quantities based on a dict following this structure :
#
# qty_to_process = {
# product_id: qty
# }
def _process_quantities(self, moves, quantities_to_process):
""" Helper to process quantities based on a dict following this structure :
qty_to_process = {
product_id: qty
}
"""
moves_to_process = moves.filtered(lambda m: m.product_id in quantities_to_process.keys())
for move in moves_to_process:
move.write({'quantity_done': quantities_to_process[move.product_id]})
def _assert_quantities(self, moves, quantities_to_process):
""" Helper to check expected quantities based on a dict following this structure :
qty_to_process = {
product_id: qty
...
}
"""
moves_to_process = moves.filtered(lambda m: m.product_id in quantities_to_process.keys())
for move in moves_to_process:
self.assertEqual(move.product_uom_qty, quantities_to_process[move.product_id])
def _create_move_quantities(self, qty_to_process, components, warehouse):
""" Helper to creates moves in order to update the quantities of components
on a specific warehouse. This ensure that all compute fields are triggered.
The structure of qty_to_process should be the following :
qty_to_process = {
component: (qty, uom),
...
}
"""
for comp in components:
f = Form(self.env['stock.move'])
f.name = 'Test Receipt Components'
f.location_id = self.env.ref('stock.stock_location_suppliers')
f.location_dest_id = warehouse.lot_stock_id
f.product_id = comp
f.product_uom = qty_to_process[comp][1]
f.product_uom_qty = qty_to_process[comp][0]
move = f.save()
move._action_confirm()
move._action_assign()
move_line = move.move_line_ids[0]
move_line.qty_done = qty_to_process[comp][0]
move._action_done()
def test_01_sale_mrp_kit_qty_delivered(self):
""" Test that the quantities delivered are correct when
a kit with subkits is ordered with multiple backorders and returns
"""
# 'kit_parent' structure:
# ---------------------------
#
# kit_parent --|- kit_2 x2 --|- component_d x1
# | |- kit_1 x2 -------|- component_a x2
# | |- component_b x1
# | |- component_c x3
# |
# |- kit_3 x1 --|- component_f x1
# | |- component_g x2
# |
# |- component_e x1
# Creation of a sale order for x7 kit_parent
partner = self.env['res.partner'].create({'name': 'My Test Partner'})
f = Form(self.env['purchase.order'])
f.partner_id = partner
with f.order_line.new() as line:
line.product_id = self.kit_parent
line.product_qty = 7.0
line.price_unit = 10
po = f.save()
po.button_confirm()
# Check picking creation, its move lines should concern
# only components. Also checks that the quantities are corresponding
# to the PO
self.assertEqual(len(po.picking_ids), 1)
order_line = po.order_line[0]
picking_original = po.picking_ids[0]
move_lines = picking_original.move_lines
products = move_lines.mapped('product_id')
kits = [self.kit_parent, self.kit_3, self.kit_2, self.kit_1]
components = [self.component_a, self.component_b, self.component_c, self.component_d, self.component_e,
self.component_f, self.component_g]
expected_quantities = {
self.component_a: 56.0,
self.component_b: 28.0,
self.component_c: 84.0,
self.component_d: 14.0,
self.component_e: 7.0,
self.component_f: 14.0,
self.component_g: 28.0
}
self.assertEqual(len(move_lines), 7)
self.assertTrue(not any(kit in products for kit in kits))
self.assertTrue(all(component in products for component in components))
self._assert_quantities(move_lines, expected_quantities)
# Process only 7 units of each component
qty_to_process = 7
move_lines.write({'quantity_done': qty_to_process})
# Create a backorder for the missing componenents
pick = po.picking_ids[0]
res = pick.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check that a backorded is created
self.assertEqual(len(po.picking_ids), 2)
backorder_1 = po.picking_ids - picking_original
self.assertEqual(backorder_1.backorder_id.id, picking_original.id)
# Even if some components are received completely,
# no KitParent should be received
self.assertEqual(order_line.qty_received, 0)
# Process just enough components to make 1 kit_parent
qty_to_process = {
self.component_a: 1,
self.component_c: 5,
}
self._process_quantities(backorder_1.move_lines, qty_to_process)
# Create a backorder for the missing componenents
res = backorder_1.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Only 1 kit_parent should be received at this point
self.assertEqual(order_line.qty_received, 1)
# Check that the second backorder is created
self.assertEqual(len(po.picking_ids), 3)
backorder_2 = po.picking_ids - picking_original - backorder_1
self.assertEqual(backorder_2.backorder_id.id, backorder_1.id)
# Set the components quantities that backorder_2 should have
expected_quantities = {
self.component_a: 48,
self.component_b: 21,
self.component_c: 72,
self.component_d: 7,
self.component_f: 7,
self.component_g: 21
}
# Check that the computed quantities are matching the theorical ones.
# Since component_e was totally processed, this componenent shouldn't be
# present in backorder_2
self.assertEqual(len(backorder_2.move_lines), 6)
move_comp_e = backorder_2.move_lines.filtered(lambda m: m.product_id.id == self.component_e.id)
self.assertFalse(move_comp_e)
self._assert_quantities(backorder_2.move_lines, expected_quantities)
# Process enough components to make x3 kit_parents
qty_to_process = {
self.component_a: 16,
self.component_b: 5,
self.component_c: 24,
self.component_g: 5
}
self._process_quantities(backorder_2.move_lines, qty_to_process)
# Create a backorder for the missing componenents
res = backorder_2.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check that x3 kit_parents are indeed received
self.assertEqual(order_line.qty_received, 3)
# Check that the third backorder is created
self.assertEqual(len(po.picking_ids), 4)
backorder_3 = po.picking_ids - (picking_original + backorder_1 + backorder_2)
self.assertEqual(backorder_3.backorder_id.id, backorder_2.id)
# Check the components quantities that backorder_3 should have
expected_quantities = {
self.component_a: 32,
self.component_b: 16,
self.component_c: 48,
self.component_d: 7,
self.component_f: 7,
self.component_g: 16
}
self._assert_quantities(backorder_3.move_lines, expected_quantities)
# Process all missing components
self._process_quantities(backorder_3.move_lines, expected_quantities)
# Validating the last backorder now it's complete.
# All kits should be received
backorder_3.button_validate()
self.assertEqual(order_line.qty_received, 7.0)
# Return all components processed by backorder_3
stock_return_picking_form = Form(self.env['stock.return.picking']
.with_context(active_ids=backorder_3.ids, active_id=backorder_3.ids[0],
active_model='stock.picking'))
return_wiz = stock_return_picking_form.save()
for return_move in return_wiz.product_return_moves:
return_move.write({
'quantity': expected_quantities[return_move.product_id],
'to_refund': True
})
res = return_wiz.create_returns()
return_pick = self.env['stock.picking'].browse(res['res_id'])
# Process all components and validate the picking
wiz_act = return_pick.button_validate()
wiz = Form(self.env[wiz_act['res_model']].with_context(wiz_act['context'])).save()
wiz.process()
# Now quantity received should be 3 again
self.assertEqual(order_line.qty_received, 3)
stock_return_picking_form = Form(self.env['stock.return.picking']
.with_context(active_ids=return_pick.ids, active_id=return_pick.ids[0],
active_model='stock.picking'))
return_wiz = stock_return_picking_form.save()
for move in return_wiz.product_return_moves:
move.quantity = expected_quantities[move.product_id]
res = return_wiz.create_returns()
return_of_return_pick = self.env['stock.picking'].browse(res['res_id'])
# Process all components except one of each
for move in return_of_return_pick.move_lines:
move.write({
'quantity_done': expected_quantities[move.product_id] - 1,
'to_refund': True
})
wiz_act = return_of_return_pick.button_validate()
wiz = Form(self.env[wiz_act['res_model']].with_context(wiz_act['context'])).save()
wiz.process()
# As one of each component is missing, only 6 kit_parents should be received
self.assertEqual(order_line.qty_received, 6)
# Check that the 4th backorder is created.
self.assertEqual(len(po.picking_ids), 7)
backorder_4 = po.picking_ids - (
picking_original + backorder_1 + backorder_2 + backorder_3 + return_of_return_pick + return_pick)
self.assertEqual(backorder_4.backorder_id.id, return_of_return_pick.id)
# Check the components quantities that backorder_4 should have
for move in backorder_4.move_lines:
self.assertEqual(move.product_qty, 1)
| 40.480583
| 123
| 0.586701
|
62be6027e7585b38a5b0f05ca10a744127759863
| 298
|
py
|
Python
|
python-modules/twisted/twisted/plugins/twisted_telnet.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 267
|
2015-03-22T15:23:48.000Z
|
2022-03-05T21:57:34.000Z
|
python-modules/twisted/twisted/plugins/twisted_telnet.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 133
|
2015-03-21T15:13:43.000Z
|
2021-12-11T23:37:58.000Z
|
python-modules/twisted/twisted/plugins/twisted_telnet.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 119
|
2015-04-28T16:07:10.000Z
|
2022-03-18T03:49:48.000Z
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedTelnet = ServiceMaker(
"Twisted Telnet Shell Server",
"twisted.tap.telnet",
"A simple, telnet-based remote debugging service.",
"telnet")
| 27.090909
| 55
| 0.741611
|
de25301d6dd14b2e0ebeab58a90f24bb227783fc
| 522
|
py
|
Python
|
graphs/graph_matrix.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | 1,568
|
2019-04-25T11:54:45.000Z
|
2022-03-31T23:35:23.000Z
|
graphs/graph_matrix.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | 58
|
2019-02-20T10:45:50.000Z
|
2020-09-30T12:18:45.000Z
|
graphs/graph_matrix.py
|
joeyzhou85/python
|
9c0cbe33076a570a3c02825b7c6d9866a760e777
|
[
"MIT"
] | 464
|
2019-04-17T04:57:16.000Z
|
2022-03-31T04:12:57.000Z
|
from __future__ import print_function
class Graph:
def __init__(self, vertex):
self.vertex = vertex
self.graph = [[0] * vertex for i in range(vertex) ]
def add_edge(self, u, v):
self.graph[u - 1][v - 1] = 1
self.graph[v - 1][u - 1] = 1
def show(self):
for i in self.graph:
for j in i:
print(j, end=' ')
print(' ')
g = Graph(100)
g.add_edge(1,4)
g.add_edge(4,2)
g.add_edge(4,5)
g.add_edge(2,5)
g.add_edge(5,3)
g.show()
| 15.818182
| 59
| 0.52682
|
aac850197673fe0ff3c3f02829c44f7a502eb98c
| 8,423
|
py
|
Python
|
tests/test_tf_model.py
|
NREL/phygnn
|
3a508ccd3efda66e851d418f9f4eda319d58a947
|
[
"BSD-3-Clause"
] | 41
|
2020-08-20T17:05:00.000Z
|
2022-03-04T12:23:50.000Z
|
tests/test_tf_model.py
|
NREL/phygnn
|
3a508ccd3efda66e851d418f9f4eda319d58a947
|
[
"BSD-3-Clause"
] | 19
|
2020-08-24T17:14:04.000Z
|
2022-03-28T22:37:15.000Z
|
tests/test_tf_model.py
|
NREL/phygnn
|
3a508ccd3efda66e851d418f9f4eda319d58a947
|
[
"BSD-3-Clause"
] | 11
|
2020-09-24T16:54:17.000Z
|
2022-02-15T00:07:58.000Z
|
"""
Tests for basic tensorflow model functionality and execution.
"""
# pylint: disable=W0613
import numpy as np
import os
import pandas as pd
import pytest
import tempfile
from phygnn.utilities import TF2
from phygnn.model_interfaces.tf_model import TfModel
TfModel.seed(0)
if TF2:
mae_key = 'val_mae'
else:
mae_key = 'val_mean_absolute_error'
N = 100
A = np.linspace(-1, 1, N)
B = np.linspace(-1, 1, N)
A, B = np.meshgrid(A, B)
A = np.expand_dims(A.flatten(), axis=1)
B = np.expand_dims(B.flatten(), axis=1)
Y = np.sqrt(A ** 2 + B ** 2)
X = np.hstack((A, B))
FEATURES = pd.DataFrame(X, columns=['a', 'b'])
LABELS = pd.DataFrame(Y, columns=['c'])
@pytest.mark.parametrize(
('hidden_layers', 'loss'),
[(None, 0.6),
([{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'}], 0.03)])
def test_nn(hidden_layers, loss):
"""Test TfModel """
model = TfModel.build_trained(FEATURES, LABELS,
hidden_layers=hidden_layers,
epochs=10,
fit_kwargs={"batch_size": 16},
early_stop=False)
n_l = len(hidden_layers) * 2 + 1 if hidden_layers is not None else 1
n_w = (len(hidden_layers) + 1) * 2 if hidden_layers is not None else 2
assert len(model.layers) == n_l
assert len(model.weights) == n_w
assert len(model.history) == 10
test_mae = np.mean(np.abs(model[X].values - Y))
assert model.history[mae_key].values[-1] < loss
assert test_mae < loss
@pytest.mark.parametrize(
('normalize', 'loss'),
[(True, 0.09),
(False, 0.015),
((True, False), 0.01),
((False, True), 0.09)])
def test_normalize(normalize, loss):
"""Test TfModel """
hidden_layers = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'}]
model = TfModel.build_trained(FEATURES, LABELS,
normalize=normalize,
hidden_layers=hidden_layers,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
test_mae = np.mean(np.abs(model[X].values - Y))
assert model.history[mae_key].values[-1] < loss
assert test_mae < loss
def test_complex_nn():
"""Test complex TfModel """
hidden_layers = [{'units': 64, 'activation': 'relu', 'dropout': 0.01},
{'units': 64},
{'batch_normalization': {'axis': -1}},
{'activation': 'relu'},
{'dropout': 0.01}]
model = TfModel.build_trained(FEATURES, LABELS,
hidden_layers=hidden_layers,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
assert len(model.layers) == 8
assert len(model.weights) == 10
test_mae = np.mean(np.abs(model[X].values - Y))
loss = 0.15
assert model.history[mae_key].values[-1] < loss
assert test_mae < loss
def test_dropout():
"""Test a model trained with dropout vs. no dropout and make sure the
predictions are different."""
hidden_layers_1 = [{'units': 64, 'activation': 'relu'},
{'units': 64}, {'activation': 'relu'}]
hidden_layers_2 = [{'units': 64, 'activation': 'relu', 'dropout': 0.05},
{'units': 64}, {'activation': 'relu'},
{'dropout': 0.05}]
TfModel.seed()
model_1 = TfModel.build_trained(FEATURES, LABELS,
hidden_layers=hidden_layers_1,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
TfModel.seed()
model_2 = TfModel.build_trained(FEATURES, LABELS,
hidden_layers=hidden_layers_2,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
out1 = model_1.history[mae_key].values[-5:]
out2 = model_2.history[mae_key].values[-5:]
assert (out2 > out1).all()
def test_save_load():
"""Test the save/load operations of TfModel"""
with tempfile.TemporaryDirectory() as td:
model_fpath = os.path.join(td, 'test_model/')
hidden_layers = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'}]
model = TfModel.build_trained(FEATURES, LABELS,
hidden_layers=hidden_layers,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False,
save_path=model_fpath)
y_pred = model[X]
loaded = TfModel.load(model_fpath)
y_pred_loaded = loaded[X]
np.allclose(y_pred.values, y_pred_loaded.values)
assert loaded.feature_names == ['a', 'b']
assert loaded.label_names == ['c']
def test_OHE():
"""
Test one-hot encoding
"""
ohe_features = FEATURES.copy()
categories = list('def')
ohe_features['categorical'] = np.random.choice(categories, len(FEATURES))
one_hot_categories = {'categorical': categories}
hidden_layers = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'}]
model = TfModel.build_trained(ohe_features, LABELS,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
assert all(np.isin(categories, model.feature_names))
assert not any(np.isin(categories, model.input_feature_names))
assert 'categorical' not in model.feature_names
assert 'categorical' in model.input_feature_names
x = ohe_features.values
out = model.predict(x)
assert 'c' in out
def test_bad_categories():
"""
Test OHE checks
"""
hidden_layers = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'}]
one_hot_categories = {'categorical': list('abc')}
feature_names = FEATURES.columns.tolist() + ['categorical']
label_names = 'c'
with pytest.raises(RuntimeError):
TfModel.build(feature_names, label_names,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers)
one_hot_categories = {'categorical': list('cdf')}
feature_names = FEATURES.columns.tolist() + ['categorical']
label_names = 'c'
with pytest.raises(RuntimeError):
TfModel.build(feature_names, label_names,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers)
one_hot_categories = {'categorical': list('def')}
feature_names = FEATURES.columns.tolist() + ['categories']
label_names = 'c'
with pytest.raises(RuntimeError):
TfModel.build(feature_names, label_names,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers)
one_hot_categories = {'cat1': list('def'), 'cat2': list('fgh')}
feature_names = FEATURES.columns.tolist() + ['cat1', 'cat2']
label_names = 'c'
with pytest.raises(RuntimeError):
TfModel.build(feature_names, label_names,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers)
ohe_features = FEATURES.copy()
categories = list('def')
ohe_features['categorical'] = np.random.choice(categories, len(FEATURES))
one_hot_categories = {'categorical': categories}
model = TfModel.build_trained(ohe_features, LABELS,
one_hot_categories=one_hot_categories,
hidden_layers=hidden_layers,
epochs=10, fit_kwargs={"batch_size": 16},
early_stop=False)
with pytest.raises(RuntimeError):
x = ohe_features.values[:, 1:]
model.predict(x)
| 37.435556
| 79
| 0.560964
|
a2c1f387fbccbf427bba3450e53e87296add5c4d
| 10,839
|
py
|
Python
|
8_pytorch_rfbnet/lib/data/coco.py
|
Het-Shah/Monk_Object_Detection
|
1d7a07193ea3455221caa41d07c33c81d50c6b3f
|
[
"Apache-2.0"
] | 15
|
2020-05-08T09:22:23.000Z
|
2022-01-10T19:13:16.000Z
|
8_pytorch_rfbnet/lib/data/coco.py
|
nathnim/Monk_Object_Detection
|
1d7a07193ea3455221caa41d07c33c81d50c6b3f
|
[
"Apache-2.0"
] | 11
|
2021-06-08T21:23:31.000Z
|
2022-03-12T00:25:55.000Z
|
8_pytorch_rfbnet/lib/data/coco.py
|
nathnim/Monk_Object_Detection
|
1d7a07193ea3455221caa41d07c33c81d50c6b3f
|
[
"Apache-2.0"
] | 18
|
2020-05-05T14:04:45.000Z
|
2021-10-30T12:02:17.000Z
|
"""VOC Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
import json
import uuid
from utils.pycocotools.coco import COCO
from utils.pycocotools.cocoeval import COCOeval
from utils.pycocotools import mask as COCOmask
class COCODetection(data.Dataset):
def __init__(self, root_dir, coco_dir, set_dir, preproc=None, target_transform=None,
dataset_name='COCO'):
self.root = root_dir
self.root_dir = root_dir
self.coco_dir = coco_dir
self.set_dir = set_dir
self.coco_name = root_dir + "_" + coco_dir + "_" + set_dir;
self.data_path = os.getcwd();
self.cache_path = os.path.join(self.data_path, 'coco_cache')
if(not os.path.isdir(self.cache_path)):
os.mkdir(self.cache_path)
self.image_set = set_dir
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self.ids = list()
self.annotations = list()
annofile = self._get_ann_file(set_dir)
_COCO = COCO(annofile)
self._COCO = _COCO
self._classes = ['__background__'];
self._classes_file = os.path.join(self.root_dir, self.coco_dir, 'annotations', 'classes.txt');
f = open(self._classes_file);
lines = f.readlines();
f.close();
for i in range(len(lines)):
self._classes.append(lines[i][:len(lines[i])-1]);
self._classes = tuple(self._classes);
self.num_classes = len(self._classes)
cats = _COCO.loadCats(_COCO.getCatIds())
self._class_to_ind = dict(zip(self._classes, range(self.num_classes)))
self._class_to_coco_cat_id = dict(zip([c['name'] for c in cats],
_COCO.getCatIds()))
indexes = _COCO.getImgIds()
self.image_indexes = indexes
self.ids.extend([self.image_path_from_index(index) for index in indexes ])
self.annotations.extend(self._load_coco_annotations(self.coco_name, indexes, _COCO))
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
im_ann = self._COCO.loadImgs(index)[0]
file_name = im_ann['file_name'];
image_path = os.path.join(self.root, self.coco_dir, self.set_dir,
file_name)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _get_ann_file(self, name):
return os.path.join(self.root, self.coco_dir, 'annotations',
'instances_' + name + '.json')
def _load_coco_annotations(self, coco_name, indexes, _COCO):
cache_file=os.path.join(self.cache_path, coco_name+'_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(coco_name,cache_file))
return roidb
gt_roidb = [self._annotation_from_index(index, _COCO)
for index in indexes]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb,fid,pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _annotation_from_index(self, index, _COCO):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = _COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = _COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = _COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
res = np.zeros((num_objs, 5))
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
res[ix, 0:4] = obj['clean_bbox']
res[ix, 4] = cls
return res
def __getitem__(self, index):
img_id = self.ids[index]
target = self.annotations[index]
img = cv2.imread(img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
# target = self.target_transform(target, width, height)
#print(target.shape)
#to_tensor = transforms.ToTensor()
#img = to_tensor(img).unsqueeze(0)
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(img_id, cv2.IMREAD_COLOR)
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~'.format(IoU_lo_thresh, IoU_hi_thresh))
print('{:.1f}'.format(100 * ap_default))
aps = list()
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
# print('{:.1f}'.format(100 * ap))
aps.append(100 * ap)
# print(aps)
print('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
print('Wrote COCO eval results to: {}'.format(eval_file))
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_indexes):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id' : index,
'category_id' : cat_id,
'bbox' : [xs[k], ys[k], ws[k], hs[k]],
'score' : scores[k]} for k in range(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
results = []
print('Collecting Results......')
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
# print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
# self.num_classes ))
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
print('Writing results json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = os.path.join(output_dir, ('detections_' +
self.coco_name +
'_results'))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
if self.coco_name.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
| 36.372483
| 102
| 0.57819
|
adfc2718afc1bd5f0057e5ca73ec46815a89e6b5
| 19,300
|
py
|
Python
|
lib/galaxy/authnz/psa_authnz.py
|
corburn/galaxy
|
850d483bf260cba969ddb21de2f5404c15e09d1c
|
[
"CC-BY-3.0"
] | 2
|
2017-03-28T12:11:41.000Z
|
2017-04-22T02:58:25.000Z
|
lib/galaxy/authnz/psa_authnz.py
|
bioinfo1992/galaxy
|
46690eb76f3a1987ea2ac5ff0d3386177cf2154f
|
[
"CC-BY-3.0"
] | 1
|
2019-08-19T15:24:17.000Z
|
2019-08-19T15:24:17.000Z
|
lib/galaxy/authnz/psa_authnz.py
|
bioinfo1992/galaxy
|
46690eb76f3a1987ea2ac5ff0d3386177cf2154f
|
[
"CC-BY-3.0"
] | null | null | null |
import json
import requests
import six
from social_core.actions import do_auth, do_complete, do_disconnect
from social_core.backends.utils import get_backend
from social_core.strategy import BaseStrategy
from social_core.utils import module_member, setting_name
from sqlalchemy.exc import IntegrityError
from galaxy.exceptions import MalformedContents
from ..authnz import IdentityProvider
from ..model import PSAAssociation, PSACode, PSANonce, PSAPartial, UserAuthnzToken
# key: a component name which PSA requests.
# value: is the name of a class associated with that key.
DEFAULTS = {
'STRATEGY': 'Strategy',
'STORAGE': 'Storage'
}
BACKENDS = {
'google': 'social_core.backends.google_openidconnect.GoogleOpenIdConnect',
"globus": "social_core.backends.globus.GlobusOpenIdConnect",
'elixir': 'social_core.backends.elixir.ElixirOpenIdConnect'
}
BACKENDS_NAME = {
'google': 'google-openidconnect',
"globus": "globus",
'elixir': 'elixir'
}
AUTH_PIPELINE = (
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social_core.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social_core.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is where emails and domains whitelists are applied (if
# defined).
'social_core.pipeline.social_auth.auth_allowed',
# Checks if the decoded response contains all the required fields such
# as an ID token or a refresh token.
'galaxy.authnz.psa_authnz.contains_required_data',
'galaxy.authnz.psa_authnz.verify',
# Checks if the current social-account is already associated in the site.
'social_core.pipeline.social_auth.social_user',
# Make up a username for this person, appends a random string at the end if
# there's any collision.
'social_core.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# 'social_core.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address.
'social_core.pipeline.social_auth.associate_by_email',
# Create a user account if we haven't found one yet.
'social_core.pipeline.user.create_user',
# Create the record that associated the social account with this user.
'social_core.pipeline.social_auth.associate_user',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social_core.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social_core.pipeline.user.user_details'
)
DISCONNECT_PIPELINE = (
'galaxy.authnz.psa_authnz.allowed_to_disconnect',
'galaxy.authnz.psa_authnz.disconnect'
)
class PSAAuthnz(IdentityProvider):
def __init__(self, provider, oidc_config, oidc_backend_config):
self.config = {'provider': provider.lower()}
for key, value in oidc_config.items():
self.config[setting_name(key)] = value
self.config[setting_name('USER_MODEL')] = 'models.User'
self.config['SOCIAL_AUTH_PIPELINE'] = AUTH_PIPELINE
self.config['DISCONNECT_PIPELINE'] = DISCONNECT_PIPELINE
self.config[setting_name('AUTHENTICATION_BACKENDS')] = (BACKENDS[provider],)
self.config["VERIFY_SSL"] = oidc_config.get("VERIFY_SSL")
self.config["REQUESTS_TIMEOUT"] = oidc_config.get("REQUESTS_TIMEOUT")
self.config["ID_TOKEN_MAX_AGE"] = oidc_config.get("ID_TOKEN_MAX_AGE")
# The following config sets PSA to call the `_login_user` function for
# logging in a user. If this setting is set to false, the `_login_user`
# would not be called, and as a result Galaxy would not know who is
# the just logged-in user.
self.config[setting_name('INACTIVE_USER_LOGIN')] = True
if provider in BACKENDS_NAME:
self._setup_idp(oidc_backend_config)
# Secondary AuthZ with Google identities is currently supported
if provider != "google":
del self.config["SOCIAL_AUTH_SECONDARY_AUTH_PROVIDER"]
del self.config["SOCIAL_AUTH_SECONDARY_AUTH_ENDPOINT"]
def _setup_idp(self, oidc_backend_config):
self.config[setting_name('AUTH_EXTRA_ARGUMENTS')] = {'access_type': 'offline'}
self.config['KEY'] = oidc_backend_config.get('client_id')
self.config['SECRET'] = oidc_backend_config.get('client_secret')
self.config['redirect_uri'] = oidc_backend_config.get('redirect_uri')
if oidc_backend_config.get('prompt') is not None:
self.config[setting_name('AUTH_EXTRA_ARGUMENTS')]['prompt'] = oidc_backend_config.get('prompt')
def _get_helper(self, name, do_import=False):
this_config = self.config.get(setting_name(name), DEFAULTS.get(name, None))
return do_import and module_member(this_config) or this_config
def _get_current_user(self, trans):
return trans.user if trans.user is not None else None
def _load_backend(self, strategy, redirect_uri):
backends = self._get_helper('AUTHENTICATION_BACKENDS')
backend = get_backend(backends, BACKENDS_NAME[self.config['provider']])
return backend(strategy, redirect_uri)
def _login_user(self, backend, user, social_user):
self.config['user'] = user
def authenticate(self, trans):
on_the_fly_config(trans.sa_session)
strategy = Strategy(trans.request, trans.session, Storage, self.config)
backend = self._load_backend(strategy, self.config['redirect_uri'])
if backend.name is BACKENDS_NAME["google"] and \
"SOCIAL_AUTH_SECONDARY_AUTH_PROVIDER" in self.config and \
"SOCIAL_AUTH_SECONDARY_AUTH_ENDPOINT" in self.config:
backend.DEFAULT_SCOPE.append("https://www.googleapis.com/auth/cloud-platform")
return do_auth(backend)
def callback(self, state_token, authz_code, trans, login_redirect_url):
on_the_fly_config(trans.sa_session)
self.config[setting_name('LOGIN_REDIRECT_URL')] = login_redirect_url
strategy = Strategy(trans.request, trans.session, Storage, self.config)
strategy.session_set(BACKENDS_NAME[self.config['provider']] + '_state', state_token)
backend = self._load_backend(strategy, self.config['redirect_uri'])
redirect_url = do_complete(
backend,
login=lambda backend, user, social_user: self._login_user(backend, user, social_user),
user=self._get_current_user(trans),
state=state_token)
return redirect_url, self.config.get('user', None)
def disconnect(self, provider, trans, disconnect_redirect_url=None, association_id=None):
on_the_fly_config(trans.sa_session)
self.config[setting_name('DISCONNECT_REDIRECT_URL')] =\
disconnect_redirect_url if disconnect_redirect_url is not None else ()
strategy = Strategy(trans.request, trans.session, Storage, self.config)
backend = self._load_backend(strategy, self.config['redirect_uri'])
response = do_disconnect(backend, self._get_current_user(trans), association_id)
if isinstance(response, six.string_types):
return True, "", response
return response.get('success', False), response.get('message', ""), ""
class Strategy(BaseStrategy):
def __init__(self, request, session, storage, config, tpl=None):
self.request = request
self.session = session if session else {}
self.config = config
self.config['SOCIAL_AUTH_REDIRECT_IS_HTTPS'] = True if self.request and self.request.host.startswith('https:') else False
self.config['SOCIAL_AUTH_GOOGLE_OPENIDCONNECT_EXTRA_DATA'] = ['id_token']
super(Strategy, self).__init__(storage, tpl)
def get_setting(self, name):
return self.config[name]
def session_get(self, name, default=None):
return self.session.get(name, default)
def session_set(self, name, value):
self.session[name] = value
def session_pop(self, name):
raise NotImplementedError('Not implemented.')
def request_data(self, merge=True):
if not self.request:
return {}
if merge:
data = self.request.GET.copy()
data.update(self.request.POST)
elif self.request.method == 'POST':
data = self.request.POST
else:
data = self.request.GET
return data
def request_host(self):
if self.request:
return self.request.host
def build_absolute_uri(self, path=None):
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
if self.request:
return \
self.request.host +\
'/authnz' + ('/' + self.config.get('provider')) if self.config.get('provider', None) is not None else ''
return path
def redirect(self, url):
return url
def html(self, content):
raise NotImplementedError('Not implemented.')
def render_html(self, tpl=None, html=None, context=None):
raise NotImplementedError('Not implemented.')
def start(self):
self.clean_partial_pipeline()
if self.backend.uses_redirect():
return self.redirect(self.backend.auth_url())
else:
return self.html(self.backend.auth_html())
def complete(self, *args, **kwargs):
return self.backend.auth_complete(*args, **kwargs)
def continue_pipeline(self, *args, **kwargs):
return self.backend.continue_pipeline(*args, **kwargs)
class Storage(object):
user = UserAuthnzToken
nonce = PSANonce
association = PSAAssociation
code = PSACode
partial = PSAPartial
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is IntegrityError
def on_the_fly_config(sa_session):
PSACode.sa_session = sa_session
UserAuthnzToken.sa_session = sa_session
PSANonce.sa_session = sa_session
PSAPartial.sa_session = sa_session
PSAAssociation.sa_session = sa_session
def contains_required_data(response=None, is_new=False, **kwargs):
"""
This function is called as part of authentication and authorization
pipeline before user is authenticated or authorized (see AUTH_PIPELINE).
This function asserts if all the data required by Galaxy for a user
is provided. It raises an exception if any of the required data is missing,
and returns void if otherwise.
:type response: dict
:param response: a dictionary containing decoded response from
OIDC backend that contain the following keys
among others:
- id_token; see: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
- access_token; see: https://tools.ietf.org/html/rfc6749#section-1.4
- refresh_token; see: https://tools.ietf.org/html/rfc6749#section-1.5
- token_type; see: https://tools.ietf.org/html/rfc6750#section-6.1.1
- scope; see: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
- expires_in; is the expiration time of the access and ID tokens in seconds since
the response was generated.
:type is_new: bool
:param is_new: has the user been authenticated?
:param kwargs: may contain the following keys among others:
- uid: user ID
- user: Galaxy user; if user is already authenticated
- backend: the backend that is used for user authentication.
- storage: an instance of Storage class.
- strategy: an instance of the Strategy class.
- state: the state code received from identity provider.
- details: details about the user's third-party identity as requested in `scope`.
:rtype: void
:return: Raises an exception if any of the required arguments is missing, and pass if all are given.
"""
hint_msg = "Visit the identity provider's permitted applications page " \
"(e.g., visit `https://myaccount.google.com/u/0/permissions` " \
"for Google), then revoke the access of this Galaxy instance, " \
"and then retry to login. If the problem persists, contact " \
"the Admin of this Galaxy instance."
if response is None or not isinstance(response, dict):
# This can happen only if PSA is not able to decode the `authnz code`
# sent back from the identity provider. PSA internally handles such
# scenarios; however, this case is implemented to prevent uncaught
# server-side errors.
raise MalformedContents(err_msg="`response` not found. {}".format(hint_msg))
if not response.get("id_token"):
# This can happen if a non-OIDC compliant backend is used;
# e.g., an OAuth2.0-based backend that only generates access token.
raise MalformedContents(err_msg="Missing identity token. {}".format(hint_msg))
if is_new and not response.get("refresh_token"):
# An identity provider (e.g., Google) sends a refresh token the first
# time user consents Galaxy's access (i.e., the first time user logs in
# to a galaxy instance using their credentials with the identity provider).
# There could be variety of scenarios under which a refresh token might
# be missing; e.g., a manipulated Galaxy's database, where a user's records
# from galaxy_user and oidc_user_authnz_tokens tables deleted after the
# user has provided consent. This can also happen under dev efforts.
# The solution is to revoke the consent by visiting the identity provider's
# website, and then retry the login process.
raise MalformedContents(err_msg="Missing refresh token. {}".format(hint_msg))
def verify(strategy=None, response=None, details=None, **kwargs):
provider = strategy.config.get("SOCIAL_AUTH_SECONDARY_AUTH_PROVIDER")
endpoint = strategy.config.get("SOCIAL_AUTH_SECONDARY_AUTH_ENDPOINT")
if provider is None or endpoint is None:
# Either the secondary authorization is not configured or OIDC IdP
# is not compatible, so allow user login.
return
if provider.lower() == "gcp":
result = requests.post(
"https://iam.googleapis.com/v1/projects/-/serviceAccounts/{}:getIamPolicy".format(endpoint),
headers={
'Authorization': 'Bearer {}'.format(response.get("access_token")),
'Accept': 'application/json'})
res = json.loads(result.content)
if result.status_code == requests.codes.ok:
email_addresses = res["bindings"][0]["members"]
email_addresses = [x.lower().replace("user:", "").strip() for x in email_addresses]
if details.get("email") in email_addresses:
# Secondary authorization successful, so allow user login.
pass
else:
raise Exception("Not authorized by GCP IAM.")
else:
# The message of the raised exception is shown to the user; hence,
# the following way of handling exception is better than using
# result.raise_for_status(), since raise_for_status may report
# sensitive information that should not be exposed to users.
raise Exception(res["error"]["message"])
else:
raise Exception("`{}` is an unsupported secondary authorization provider, contact admin.".format(provider))
def allowed_to_disconnect(name=None, user=None, user_storage=None, strategy=None,
backend=None, request=None, details=None, **kwargs):
"""
Disconnect is the process of disassociating a Galaxy user and a third-party authnz.
In other words, it is the process of removing any access and/or ID tokens of a user.
This function should raise an exception if disconnection is NOT permitted. Do NOT
return any value (except an empty dictionary) if disconnect is allowed. Because, at
least until PSA social_core v.1.5.0, any returned value (e.g., Boolean) will result
in ignoring the rest of the disconnect pipeline.
See the following condition in `run_pipeline` function:
https://github.com/python-social-auth/social-core/blob/master/social_core/backends/base.py#L114
:param name: name of the backend (e.g., google-openidconnect)
:type user: galaxy.model.User
:type user_storage: galaxy.model.UserAuthnzToken
:type strategy: galaxy.authnz.psa_authnz.Strategy
:type backend: PSA backend object (e.g., social_core.backends.google_openidconnect.GoogleOpenIdConnect)
:type request: webob.multidict.MultiDict
:type details: dict
:return: empty dict
"""
pass
def disconnect(name=None, user=None, user_storage=None, strategy=None,
backend=None, request=None, details=None, **kwargs):
"""
Disconnect is the process of disassociating a Galaxy user and a third-party authnz.
In other words, it is the process of removing any access and/or ID tokens of a user.
:param name: name of the backend (e.g., google-openidconnect)
:type user: galaxy.model.User
:type user_storage: galaxy.model.UserAuthnzToken
:type strategy: galaxy.authnz.psa_authnz.Strategy
:type backend: PSA backend object (e.g., social_core.backends.google_openidconnect.GoogleOpenIdConnect)
:type request: webob.multidict.MultiDict
:type details: dict
:return: void or empty dict. Any key-value pair inside the dictionary will be available
inside PSA only, and will be passed to the next step in the disconnect pipeline. However,
the key-value pair will not be returned as a result of calling the `do_disconnect` function.
Additionally, returning any value except for a(n) (empty) dictionary, will break the
disconnect pipeline, and that value will be returned as a result of calling the `do_disconnect` function.
"""
user_authnz = strategy.trans.sa_session.query(user_storage).filter(user_storage.table.c.user_id == user.id,
user_storage.table.c.provider == name).first()
if user_authnz is None:
return {'success': False, 'message': 'Not authenticated by any identity providers.'}
# option A
strategy.trans.sa_session.delete(user_authnz)
# option B
# user_authnz.extra_data = None
strategy.trans.sa_session.flush()
| 46.394231
| 129
| 0.67943
|
e74a43149d18359a111cd8b06db556e2a1be05d9
| 540
|
py
|
Python
|
pagination/page_list.py
|
AmrAnwar/notarabic
|
38b65b38dfc51afeac9ad0e3c3fca8f1c356ffbd
|
[
"BSD-2-Clause"
] | 1
|
2019-06-10T20:14:40.000Z
|
2019-06-10T20:14:40.000Z
|
pagination/page_list.py
|
AmrAnwar/notarabic
|
38b65b38dfc51afeac9ad0e3c3fca8f1c356ffbd
|
[
"BSD-2-Clause"
] | 1
|
2021-06-01T23:49:52.000Z
|
2021-06-01T23:49:52.000Z
|
pagination/page_list.py
|
AmrAnwar/notarabic
|
38b65b38dfc51afeac9ad0e3c3fca8f1c356ffbd
|
[
"BSD-2-Clause"
] | 1
|
2019-06-23T23:33:45.000Z
|
2019-06-23T23:33:45.000Z
|
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class PagePageListPagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'limit'
def get_paginated_response(self, data):
return Response({
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'result': data,
'status': True,
'pages_count': self.page.paginator.num_pages,
'objects_count': len(data)
})
| 28.421053
| 58
| 0.65
|
7a165cbfe803f379a08af6bcf7248ea63cd071a3
| 1,484
|
py
|
Python
|
cqlengine/tests/statements/test_insert_statement.py
|
jfelectron/cqlengine
|
34a121aeebfb1ab6954b08a664eeba53d6c122ba
|
[
"BSD-3-Clause"
] | 57
|
2015-01-06T14:47:20.000Z
|
2020-10-27T00:55:48.000Z
|
cqlengine/tests/statements/test_insert_statement.py
|
jfelectron/cqlengine
|
34a121aeebfb1ab6954b08a664eeba53d6c122ba
|
[
"BSD-3-Clause"
] | 66
|
2015-01-03T20:48:41.000Z
|
2021-07-25T00:01:49.000Z
|
cqlengine/tests/statements/test_insert_statement.py
|
jfelectron/cqlengine
|
34a121aeebfb1ab6954b08a664eeba53d6c122ba
|
[
"BSD-3-Clause"
] | 35
|
2015-01-03T20:49:51.000Z
|
2021-07-23T12:34:52.000Z
|
from unittest import TestCase
from cqlengine.statements import InsertStatement, StatementException, AssignmentClause
import six
class InsertStatementTests(TestCase):
def test_where_clause_failure(self):
""" tests that where clauses cannot be added to Insert statements """
ist = InsertStatement('table', None)
with self.assertRaises(StatementException):
ist.add_where_clause('s')
def test_statement(self):
ist = InsertStatement('table', None)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
self.assertEqual(
six.text_type(ist),
'INSERT INTO table ("a", "c") VALUES (%(0)s, %(1)s)'
)
def test_context_update(self):
ist = InsertStatement('table', None)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
ist.update_context_id(4)
self.assertEqual(
six.text_type(ist),
'INSERT INTO table ("a", "c") VALUES (%(4)s, %(5)s)'
)
ctx = ist.get_context()
self.assertEqual(ctx, {'4': 'b', '5': 'd'})
def test_additional_rendering(self):
ist = InsertStatement('table', ttl=60)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
self.assertIn('USING TTL 60', six.text_type(ist))
| 35.333333
| 86
| 0.632749
|
bbecf059d4515f7025f68a97e2406e5fd5e488f2
| 387
|
py
|
Python
|
plantumlcli/utils/function.py
|
cathiele/plantumlcli
|
c969c38795538245ab06f4c52a230ed9ebca30c0
|
[
"Apache-2.0"
] | 11
|
2020-11-29T15:08:35.000Z
|
2021-11-19T16:40:15.000Z
|
plantumlcli/utils/function.py
|
cathiele/plantumlcli
|
c969c38795538245ab06f4c52a230ed9ebca30c0
|
[
"Apache-2.0"
] | 5
|
2020-11-30T06:18:07.000Z
|
2021-11-20T12:32:56.000Z
|
plantumlcli/utils/function.py
|
cathiele/plantumlcli
|
c969c38795538245ab06f4c52a230ed9ebca30c0
|
[
"Apache-2.0"
] | 1
|
2021-11-19T16:40:19.000Z
|
2021-11-19T16:40:19.000Z
|
from typing import Callable, Any
def all_func(*funcs: Callable[[], Any], quick_fail: bool = True) -> Callable[[], bool]:
def _func():
_success = True
for _item_func in funcs:
_ret = not not _item_func()
_success = _success and _ret
if not _success and quick_fail:
break
return _success
return _func
| 24.1875
| 87
| 0.576227
|
80b957ffb6e3cc6a6d385e5301b0ac679f3b0cfe
| 2,354
|
py
|
Python
|
python/tskit/exceptions.py
|
winni2k/tskit
|
92fe9c04a27385401732a698843756aa797bacdd
|
[
"MIT"
] | null | null | null |
python/tskit/exceptions.py
|
winni2k/tskit
|
92fe9c04a27385401732a698843756aa797bacdd
|
[
"MIT"
] | null | null | null |
python/tskit/exceptions.py
|
winni2k/tskit
|
92fe9c04a27385401732a698843756aa797bacdd
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (c) 2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Exceptions defined in tskit.
"""
from _tskit import TskitException
from _tskit import LibraryError
from _tskit import FileFormatError
from _tskit import VersionTooNewError
from _tskit import VersionTooOldError
# Some exceptions are defined in the low-level module. In particular, the
# superclass of all exceptions for tskit is defined here. We define the
# docstrings here to avoid difficulties with compiling C code on
# readthedocs.
# TODO finalise this when working out the docs structure for tskit on rtd.
TskitException.__doc__ = "Superclass of all exceptions defined in tskit."
LibraryError.__doc__ = "Generic low-level error raised by the C library."
FileFormatError.__doc__ = "An error was detected in the file format."
VersionTooNewError.__doc__ = """
The version of the file is too new and cannot be read by the library.
"""
VersionTooOldError.__doc__ = """
The version of the file is too old and cannot be read by the library.
"""
class DuplicatePositionsError(TskitException):
"""
Duplicate positions in the list of sites.
"""
class ProvenanceValidationError(TskitException):
"""
A JSON document did non validate against the provenance schema.
"""
| 39.233333
| 80
| 0.774851
|
779ce43c112895dc026b449bd140f2e6779a5684
| 2,410
|
py
|
Python
|
src/models/model.py
|
FreddyMurphy/MLOpsProject
|
176d91636ad2ba6da565bba8df5745e9d65a9137
|
[
"MIT"
] | null | null | null |
src/models/model.py
|
FreddyMurphy/MLOpsProject
|
176d91636ad2ba6da565bba8df5745e9d65a9137
|
[
"MIT"
] | 3
|
2021-06-20T17:07:06.000Z
|
2021-06-22T16:32:56.000Z
|
src/models/model.py
|
FreddyMurphy/MLOpsProject
|
176d91636ad2ba6da565bba8df5745e9d65a9137
|
[
"MIT"
] | null | null | null |
import torch_enhance
from kornia.losses import SSIMLoss
from pytorch_lightning import LightningModule
from torch import optim
from torch_enhance import metrics
class SRCNN(LightningModule):
def __init__(self,
scaling=2,
n_channels=3,
lr=0.001,
window_size=5,
optimizer='Adam'):
super().__init__()
self.model = torch_enhance.models.SRCNN(scaling, n_channels)
self.lr = lr
self.optimizer = optimizer
self.criterion = SSIMLoss(window_size)
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = getattr(optim, self.optimizer)(self.parameters(),
lr=self.lr)
return optimizer
def training_step(self, train_batch, batch_idx):
high_res, low_res = train_batch
upscaled = self.model(low_res)
loss = self.criterion(upscaled, high_res)
# metrics
# Mean absolute error
mae = metrics.mae(upscaled, high_res)
# Peak-signal-noise ratio
psnr = metrics.psnr(upscaled, high_res)
# Logs
self.log("train_loss", loss)
self.log("train_mae", mae)
self.log("train_psnr", psnr)
return loss
# Very much similar to training step
def validation_step(self, val_batch, batch_idx):
high_res, low_res = val_batch
upscaled = self.model(low_res)
loss = self.criterion(upscaled, high_res)
# metrics
# Mean absolute error
mae = metrics.mae(upscaled, high_res)
# Peak-signal-noise ratio
psnr = metrics.psnr(upscaled, high_res)
# Logs
self.log("val_loss", loss)
self.log("val_mae", mae)
self.log("val_psnr", psnr)
return loss
# Very much similar to training step
def test_step(self, test_batch, batch_idx):
high_res, low_res = test_batch
upscaled = self.model(low_res)
loss = self.criterion(upscaled, high_res)
# metrics
# Mean absolute error
mae = metrics.mae(upscaled, high_res)
# Peak-signal-noise ratio
psnr = metrics.psnr(upscaled, high_res)
# Logs
self.log("test_loss", loss)
self.log("test_mae", mae)
self.log("test_psnr", psnr)
return loss
| 26.195652
| 69
| 0.594191
|
63ba3d0973c54622d61ebd9a72274a17959bf539
| 1,258
|
py
|
Python
|
index.py
|
esan94/bsm03
|
51a1ceab2dc9e6f834aa0400eb44a7480dd854ca
|
[
"MIT"
] | 5
|
2020-08-18T21:32:55.000Z
|
2021-02-05T09:38:26.000Z
|
index.py
|
esan94/bsm03
|
51a1ceab2dc9e6f834aa0400eb44a7480dd854ca
|
[
"MIT"
] | null | null | null |
index.py
|
esan94/bsm03
|
51a1ceab2dc9e6f834aa0400eb44a7480dd854ca
|
[
"MIT"
] | 3
|
2020-08-24T14:25:40.000Z
|
2020-09-30T19:06:15.000Z
|
"""
Este es el archivo que se usa para ejecutar la aplicación.
"""
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import constants as c
from app import app
from home import layout_home
from update import layout_update
from analysis import layout_analysis
from strategy_one import layout_strategyone
# Creación del HTML principal de la app
app.layout = html.Div(children=[
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
], style={'backgroundColor': c.PALETTE['black']})
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
"""
En esta función se manejan las diferentes rutas del punto
web.
Parámetros:
----------
pathname:
ruta relativa del punto web.
Return:
------
String: ruta en uso.
"""
if pathname == '/':
return layout_home
elif pathname == '/update':
return layout_update
elif pathname == '/analysis':
return layout_analysis
elif pathname == '/strategyone':
return layout_strategyone
else:
return '404'
if __name__ == '__main__':
app.run_server(debug=True)
| 25.16
| 61
| 0.665342
|
e9d75f3c9c9fe2734301a6bdb1a790968375ca09
| 1,555
|
py
|
Python
|
setup.py
|
digirati-co-uk/pyelucidate
|
f321fefdb3c59f18c886a1870db1951177625241
|
[
"MIT"
] | 4
|
2019-04-04T19:59:09.000Z
|
2019-05-24T13:54:36.000Z
|
setup.py
|
digirati-co-uk/pyelucidate
|
f321fefdb3c59f18c886a1870db1951177625241
|
[
"MIT"
] | 1
|
2021-06-01T23:02:34.000Z
|
2021-06-01T23:02:34.000Z
|
setup.py
|
digirati-co-uk/pyelucidate
|
f321fefdb3c59f18c886a1870db1951177625241
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
readme = open("README.rst").read()
doclink = """
Documentation
-------------
The full documentation is at https://pyelucidate.readthedocs.io."""
history = open("HISTORY.rst").read().replace(".. :changelog:", "")
setup(
name="pyelucidate",
version="0.3.3",
description="Open Source Python Tools for the Elucidate Annotation Server.",
long_description=readme + "\n\n" + doclink + "\n\n" + history,
author="Matt McGrattan",
author_email="matt.mcgrattan@digirati.com",
url="https://github.com/digirati-co-uk/pyelucidate",
packages=["pyelucidate"],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
package_dir={"pyelucidate": "pyelucidate"},
include_package_data=True,
install_requires=["aiohttp>=3.4.4", "requests>=2.20.1"],
license="MIT",
zip_safe=False,
keywords="pyelucidate",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| 29.339623
| 80
| 0.636656
|
d80b849bf80ee6b8d6793954266848e6cab37d55
| 9,352
|
py
|
Python
|
homeassistant/components/homekit_controller/__init__.py
|
cipacda/core
|
1a9bb47f78401340c80c4bfe52b93dcaee5d3625
|
[
"Apache-2.0"
] | 5
|
2017-01-26T16:33:09.000Z
|
2018-07-20T13:50:47.000Z
|
homeassistant/components/homekit_controller/__init__.py
|
cipacda/core
|
1a9bb47f78401340c80c4bfe52b93dcaee5d3625
|
[
"Apache-2.0"
] | 66
|
2020-08-05T07:21:39.000Z
|
2022-03-31T06:02:16.000Z
|
homeassistant/components/homekit_controller/__init__.py
|
cipacda/core
|
1a9bb47f78401340c80c4bfe52b93dcaee5d3625
|
[
"Apache-2.0"
] | 2
|
2021-03-02T10:36:33.000Z
|
2021-04-10T07:57:03.000Z
|
"""Support for Homekit device discovery."""
from __future__ import annotations
import asyncio
from typing import Any
import aiohomekit
from aiohomekit.model import Accessory
from aiohomekit.model.characteristics import (
Characteristic,
CharacteristicPermissions,
CharacteristicsTypes,
)
from aiohomekit.model.services import Service, ServicesTypes
from homeassistant.components import zeroconf
from homeassistant.const import ATTR_VIA_DEVICE, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import DeviceInfo, Entity
from .config_flow import normalize_hkid
from .connection import HKDevice
from .const import CONTROLLER, DOMAIN, ENTITY_MAP, KNOWN_DEVICES, TRIGGERS
from .storage import EntityMapStorage
def escape_characteristic_name(char_name):
"""Escape any dash or dots in a characteristics name."""
return char_name.replace("-", "_").replace(".", "_")
class HomeKitEntity(Entity):
"""Representation of a Home Assistant HomeKit device."""
_attr_should_poll = False
def __init__(self, accessory, devinfo):
"""Initialise a generic HomeKit device."""
self._accessory = accessory
self._aid = devinfo["aid"]
self._iid = devinfo["iid"]
self._features = 0
self.setup()
self._signals = []
super().__init__()
@property
def accessory(self) -> Accessory:
"""Return an Accessory model that this entity is attached to."""
return self._accessory.entity_map.aid(self._aid)
@property
def accessory_info(self) -> Service:
"""Information about the make and model of an accessory."""
return self.accessory.services.first(
service_type=ServicesTypes.ACCESSORY_INFORMATION
)
@property
def service(self) -> Service:
"""Return a Service model that this entity is attached to."""
return self.accessory.services.iid(self._iid)
async def async_added_to_hass(self):
"""Entity added to hass."""
self._signals.append(
self.hass.helpers.dispatcher.async_dispatcher_connect(
self._accessory.signal_state_updated, self.async_write_ha_state
)
)
self._accessory.add_pollable_characteristics(self.pollable_characteristics)
self._accessory.add_watchable_characteristics(self.watchable_characteristics)
async def async_will_remove_from_hass(self):
"""Prepare to be removed from hass."""
self._accessory.remove_pollable_characteristics(self._aid)
self._accessory.remove_watchable_characteristics(self._aid)
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
async def async_put_characteristics(self, characteristics: dict[str, Any]):
"""
Write characteristics to the device.
A characteristic type is unique within a service, but in order to write
to a named characteristic on a bridge we need to turn its type into
an aid and iid, and send it as a list of tuples, which is what this
helper does.
E.g. you can do:
await entity.async_put_characteristics({
CharacteristicsTypes.ON: True
})
"""
payload = self.service.build_update(characteristics)
return await self._accessory.put_characteristics(payload)
def setup(self):
"""Configure an entity baed on its HomeKit characteristics metadata."""
self.pollable_characteristics = []
self.watchable_characteristics = []
char_types = self.get_characteristic_types()
# Setup events and/or polling for characteristics directly attached to this entity
for char in self.service.characteristics.filter(char_types=char_types):
self._setup_characteristic(char)
# Setup events and/or polling for characteristics attached to sub-services of this
# entity (like an INPUT_SOURCE).
for service in self.accessory.services.filter(parent_service=self.service):
for char in service.characteristics.filter(char_types=char_types):
self._setup_characteristic(char)
def _setup_characteristic(self, char: Characteristic):
"""Configure an entity based on a HomeKit characteristics metadata."""
# Build up a list of (aid, iid) tuples to poll on update()
if CharacteristicPermissions.paired_read in char.perms:
self.pollable_characteristics.append((self._aid, char.iid))
# Build up a list of (aid, iid) tuples to subscribe to
if CharacteristicPermissions.events in char.perms:
self.watchable_characteristics.append((self._aid, char.iid))
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)
return f"homekit-{serial}-{self._iid}"
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self.accessory_info.value(CharacteristicsTypes.NAME)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._accessory.available and self.service.available
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
info = self.accessory_info
accessory_serial = info.value(CharacteristicsTypes.SERIAL_NUMBER)
device_info = DeviceInfo(
identifiers={(DOMAIN, "serial-number", accessory_serial)},
manufacturer=info.value(CharacteristicsTypes.MANUFACTURER, ""),
model=info.value(CharacteristicsTypes.MODEL, ""),
name=info.value(CharacteristicsTypes.NAME),
sw_version=info.value(CharacteristicsTypes.FIRMWARE_REVISION, ""),
)
# Some devices only have a single accessory - we don't add a
# via_device otherwise it would be self referential.
bridge_serial = self._accessory.connection_info["serial-number"]
if accessory_serial != bridge_serial:
device_info[ATTR_VIA_DEVICE] = (DOMAIN, "serial-number", bridge_serial)
return device_info
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
raise NotImplementedError
class AccessoryEntity(HomeKitEntity):
"""A HomeKit entity that is related to an entire accessory rather than a specific service or characteristic."""
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)
return f"homekit-{serial}-aid:{self._aid}"
class CharacteristicEntity(HomeKitEntity):
"""
A HomeKit entity that is related to an single characteristic rather than a whole service.
This is typically used to expose additional sensor, binary_sensor or number entities that don't belong with
the service entity.
"""
def __init__(self, accessory, devinfo, char):
"""Initialise a generic single characteristic HomeKit entity."""
self._char = char
super().__init__(accessory, devinfo)
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)
return f"homekit-{serial}-aid:{self._aid}-sid:{self._char.service.iid}-cid:{self._char.iid}"
async def async_setup_entry(hass, entry):
"""Set up a HomeKit connection on a config entry."""
conn = HKDevice(hass, entry, entry.data)
hass.data[KNOWN_DEVICES][conn.unique_id] = conn
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=normalize_hkid(conn.unique_id)
)
if not await conn.async_setup():
del hass.data[KNOWN_DEVICES][conn.unique_id]
raise ConfigEntryNotReady
return True
async def async_setup(hass, config):
"""Set up for Homekit devices."""
map_storage = hass.data[ENTITY_MAP] = EntityMapStorage(hass)
await map_storage.async_initialize()
async_zeroconf_instance = await zeroconf.async_get_async_instance(hass)
hass.data[CONTROLLER] = aiohomekit.Controller(
async_zeroconf_instance=async_zeroconf_instance
)
hass.data[KNOWN_DEVICES] = {}
hass.data[TRIGGERS] = {}
async def _async_stop_homekit_controller(event):
await asyncio.gather(
*(
connection.async_unload()
for connection in hass.data[KNOWN_DEVICES].values()
)
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop_homekit_controller)
return True
async def async_unload_entry(hass, entry):
"""Disconnect from HomeKit devices before unloading entry."""
hkid = entry.data["AccessoryPairingID"]
if hkid in hass.data[KNOWN_DEVICES]:
connection = hass.data[KNOWN_DEVICES][hkid]
await connection.async_unload()
return True
async def async_remove_entry(hass, entry):
"""Cleanup caches before removing config entry."""
hkid = entry.data["AccessoryPairingID"]
hass.data[ENTITY_MAP].async_delete_map(hkid)
| 35.694656
| 115
| 0.691082
|
4d967d992a282f727a447350ae70f5216bd83da5
| 2,028
|
py
|
Python
|
paralelismo/k_means/using_nworkers/SparseManualWorker.py
|
davidcediel12/Cliente-Servidor
|
58859cb8bb67167e0929b74071de86ce53376943
|
[
"Apache-2.0"
] | null | null | null |
paralelismo/k_means/using_nworkers/SparseManualWorker.py
|
davidcediel12/Cliente-Servidor
|
58859cb8bb67167e0929b74071de86ce53376943
|
[
"Apache-2.0"
] | 1
|
2020-05-05T00:12:58.000Z
|
2020-05-05T00:12:58.000Z
|
paralelismo/k_means/using_nworkers/SparseManualWorker.py
|
davidcediel12/Cliente-Servidor
|
58859cb8bb67167e0929b74071de86ce53376943
|
[
"Apache-2.0"
] | 1
|
2020-05-04T23:57:44.000Z
|
2020-05-04T23:57:44.000Z
|
"""
Solo cambia las funciones de distancia y la forma de abrir
el dataset, la suma de los puntos ahora tambien es una matriz
dispersa
"""
import argparse
import numpy as np
from utils import *
import time
from GenericWorker import SparseWorkerGeneric
from os.path import join
class SparseManualWorker(SparseWorkerGeneric):
def calculateTagsAndSum(self, centroids, points, norm_centroids):
#Calcula la distancia entre unos puntos y todos los centroides, con esto
#saca el cluster mas cercano para asi construir el vector de tags y
#la suma de los puntos de cada cluster
#Matriz de tamanio data * centroids
y = []
sizes = [0] * self.n_clusters
sum_points = np.zeros((self.n_clusters, self.n_features))
init_time = time.time()
for p in points:
distance_point = []
for i, centroid in enumerate(centroids):
if self.distance_metric == "angular":
distance_point.append(cosineSimilaritySparseManual2(p, centroid, norm_centroids[i]))
elif self.distance_metric == "euclidean":
distance_point.append(euclideanDistanceSparseManual2(p, centroid))
#A partir de las distancias anteriormente calculadas, crea
#los tags, ademas de sumar los puntos de cada
#cluster para que luego el sink los pueda promediar
index_min = int(np.argmin(distance_point))
y.append(index_min) #Tags
sizes[index_min] += 1
sum_points[index_min] = sumDictAndPoint(sum_points[index_min], p)
print(f"Time {time.time()-init_time}")
return (y, np.ndarray.tolist(sum_points), sizes)
if __name__ == "__main__":
console = argparse.ArgumentParser()
console.add_argument("dir_ventilator", type = str)
console.add_argument("dir_sink", type = str)
args = console.parse_args()
worker = SparseManualWorker(args.dir_ventilator, args.dir_sink)
worker.listen()
| 36.214286
| 104
| 0.662229
|
9c18936882d46b5cada5e3b0a802cf8be367b27b
| 7,168
|
py
|
Python
|
mathematics_dataset/util/display_test.py
|
PhysicsTeacher13/Mathematics_Dataset
|
7f13bf661e6f36d61542bf0360b27f31eb9efe20
|
[
"Apache-2.0"
] | 8
|
2019-11-13T13:48:09.000Z
|
2020-05-06T07:49:11.000Z
|
mathematics_dataset/util/display_test.py
|
PhysicsTeacher13/Mathematics_Dataset
|
7f13bf661e6f36d61542bf0360b27f31eb9efe20
|
[
"Apache-2.0"
] | 1
|
2021-10-05T16:16:36.000Z
|
2021-10-05T16:16:36.000Z
|
mathematics_dataset/util/display_test.py
|
LaudateCorpus1/mathematics_dataset
|
e91dba649b843597c14b9d84dfe92bff79b7d299
|
[
"Apache-2.0"
] | 1
|
2019-11-13T14:01:03.000Z
|
2019-11-13T14:01:03.000Z
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.util.display."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.util import display
import sympy
class DecimalTest(absltest.TestCase):
def testBasic_integer(self):
decimal = display.Decimal(123)
self.assertEqual(str(decimal), '123')
self.assertEqual(sympy.sympify(decimal), sympy.Integer(123))
self.assertEqual(decimal.decimal_places(), 0)
def testBasic_ten(self):
decimal = display.Decimal(10)
self.assertEqual(str(decimal), '10')
self.assertEqual(sympy.sympify(decimal), sympy.Integer(10))
self.assertEqual(decimal.decimal_places(), 0)
def testBasic(self):
decimal = display.Decimal(sympy.Rational(123, 100))
self.assertEqual(str(decimal), '1.23')
self.assertEqual(sympy.sympify(decimal), sympy.Rational(123, 100))
self.assertEqual(decimal.decimal_places(), 2)
def testStr(self):
self.assertEqual(str(display.Decimal(sympy.Rational(0, 10))), '0')
self.assertEqual(str(display.Decimal(sympy.Rational(-1, 10))), '-0.1')
self.assertEqual(str(display.Decimal(sympy.Rational(-11, 10))), '-1.1')
self.assertEqual(str(display.Decimal(sympy.Rational(11, 10))), '1.1')
self.assertEqual(str(display.Decimal(sympy.Rational(101, 1))), '101')
self.assertEqual(
str(display.Decimal(sympy.Rational(20171, 1000000))), '0.020171')
def testStr_verySmall(self):
# Tests it doesn't display in "scientific" notation 1E-9.
decimal = display.Decimal(sympy.Rational(1, 1000000000))
self.assertEqual(str(decimal), '0.000000001')
def testAdd(self):
self.assertEqual((display.Decimal(2) + display.Decimal(3)).value, 5)
def testSub(self):
self.assertEqual((display.Decimal(2) - display.Decimal(3)).value, -1)
def testMul(self):
self.assertEqual((display.Decimal(2) * display.Decimal(3)).value, 6)
def testRound(self):
decimal = display.Decimal(sympy.Rational(2675, 1000)) # 2.675
self.assertEqual(sympy.sympify(decimal.round()), sympy.Integer(3))
self.assertEqual(sympy.sympify(decimal.round(1)), sympy.Rational(27, 10))
self.assertEqual(sympy.sympify(decimal.round(2)), sympy.Rational(268, 100))
self.assertEqual(sympy.sympify(decimal.round(3)),
sympy.Rational(2675, 1000))
def testInt(self):
decimal = display.Decimal(123)
self.assertEqual(int(decimal), 123)
def testInt_errorIfNonInt(self):
decimal = display.Decimal(sympy.Rational(1, 2))
with self.assertRaisesRegexp(TypeError, 'Cannot represent'):
int(decimal)
def testComparison(self):
decimal = display.Decimal(sympy.Rational(-1, 2))
# pylint: disable=g-generic-assert
self.assertFalse(decimal != -0.5)
self.assertTrue(decimal != 0)
self.assertFalse(decimal < -0.5)
self.assertTrue(decimal < 0)
self.assertTrue(decimal <= -0.5)
self.assertTrue(decimal <= 0)
self.assertFalse(decimal > -0.5)
self.assertTrue(decimal > -1)
self.assertTrue(decimal >= -0.5)
self.assertFalse(decimal >= 0)
self.assertFalse(decimal == 0)
self.assertTrue(decimal == -0.5)
def testNegation(self):
decimal = display.Decimal(sympy.Rational(1, 2))
decimal = -decimal
self.assertNotEqual(decimal, 0.5)
self.assertEqual(decimal, -0.5)
class PercentageTest(absltest.TestCase):
def testPercentage(self):
percentage = display.Percentage(1.5)
self.assertEqual(str(percentage), '150%')
percentage = display.Percentage(sympy.Rational(67, 100))
self.assertEqual(str(percentage), '67%')
percentage = display.Percentage(sympy.Rational(67, 1000))
self.assertEqual(str(percentage), '6.7%')
class NonSimpleRationalTest(absltest.TestCase):
def testBasic(self):
frac = display.NonSimpleRational(4, 6)
self.assertEqual(frac.numer, 4)
self.assertEqual(frac.denom, 6)
self.assertEqual(str(frac), '4/6')
class StringNumberTest(absltest.TestCase):
def testIntegerToWords(self):
words = display.StringNumber(0)
self.assertEqual(str(words), 'zero')
self.assertEqual(sympy.sympify(words), 0)
words = display.StringNumber(8)
self.assertEqual(str(words), 'eight')
self.assertEqual(sympy.sympify(words), 8)
words = display.StringNumber(12)
self.assertEqual(str(words), 'twelve')
self.assertEqual(sympy.sympify(words), 12)
words = display.StringNumber(30)
self.assertEqual(str(words), 'thirty')
self.assertEqual(sympy.sympify(words), 30)
words = display.StringNumber(100)
self.assertEqual(str(words), 'one-hundred')
self.assertEqual(sympy.sympify(words), 100)
words = display.StringNumber(103)
self.assertEqual(str(words), 'one-hundred-and-three')
self.assertEqual(sympy.sympify(words), 103)
words = display.StringNumber(15439822)
self.assertEqual(str(words), 'fifteen-million-four-hundred-and-thirty-nine'
'-thousand-eight-hundred-and-twenty-two')
self.assertEqual(sympy.sympify(words), 15439822)
def testRationalToWords(self):
words = display.StringNumber(sympy.Rational(2, 3))
self.assertEqual(str(words), 'two thirds')
class StringOrdinalTest(absltest.TestCase):
def testBasic(self):
ordinal = display.StringOrdinal(0)
self.assertEqual(str(ordinal), 'zeroth')
ordinal = display.StringOrdinal(10)
self.assertEqual(str(ordinal), 'tenth')
def testCreate_errorIfNegative(self):
with self.assertRaisesRegexp(ValueError, 'Unsupported ordinal'):
display.StringOrdinal(-1)
class NumberListTest(absltest.TestCase):
def testBasic(self):
numbers = [2, 3, 1]
number_list = display.NumberList(numbers)
string = str(number_list)
self.assertEqual(string, '2, 3, 1')
class NumberInBaseTest(absltest.TestCase):
def testBasic(self):
self.assertEqual(str(display.NumberInBase(1, 10)), '1')
self.assertEqual(str(display.NumberInBase(-1, 10)), '-1')
self.assertEqual(str(display.NumberInBase(1, 2)), '1')
self.assertEqual(str(display.NumberInBase(-1, 2)), '-1')
self.assertEqual(str(display.NumberInBase(2, 2)), '10')
self.assertEqual(str(display.NumberInBase(-2, 2)), '-10')
self.assertEqual(str(display.NumberInBase(10, 16)), 'a')
self.assertEqual(str(display.NumberInBase(16, 16)), '10')
self.assertEqual(str(display.NumberInBase(256, 16)), '100')
self.assertEqual(str(display.NumberInBase(-75483, 10)), '-75483')
if __name__ == '__main__':
absltest.main()
| 34.461538
| 79
| 0.70731
|
746c25a707a05c3d0a0b60b34a07e9488ab90faf
| 1,066
|
py
|
Python
|
Python/0002AddTwoNumbers.py
|
MarginCode/LeetCodeNote
|
4b166687806e9672f13586eac88d04da867606ee
|
[
"MIT"
] | null | null | null |
Python/0002AddTwoNumbers.py
|
MarginCode/LeetCodeNote
|
4b166687806e9672f13586eac88d04da867606ee
|
[
"MIT"
] | null | null | null |
Python/0002AddTwoNumbers.py
|
MarginCode/LeetCodeNote
|
4b166687806e9672f13586eac88d04da867606ee
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
carry=0
head=tail=None
#首先判断是否为空
while (l1 or l2):
#载入每个node的value
x = l1.val if l1 else 0
y = l2.val if l2 else 0
#每位相加(含上次计算得到的进位)
everyone_place_sum=x+y+carry
#计算进位并指向下个节点
if head==None:
head=tail=ListNode(everyone_place_sum%10)
else:
#新生成一个对象附到next上
tail.next=ListNode(everyone_place_sum%10)
#步进到新节点上
tail = tail.next
#取整算进位
carry=everyone_place_sum//10
#尾node需要进位的话,不加这个会被丢弃(上一位tail.next==None)(坑死我了/(ㄒoㄒ)/~~)
if carry > 0:
tail.next = ListNode(carry)
#l1\l2向下步进一个node
if l1: l1=l1.next
if l2: l2=l2.next
return head
| 31.352941
| 68
| 0.515947
|
902fa32c52502716c5c7451850208c9ce923c6e0
| 570
|
py
|
Python
|
setup.py
|
MikeData/cmd-v4-hierarchy-parent-injector
|
3ef5687f2c6a8d9fd18fce863b022939cf1a01b0
|
[
"MIT"
] | null | null | null |
setup.py
|
MikeData/cmd-v4-hierarchy-parent-injector
|
3ef5687f2c6a8d9fd18fce863b022939cf1a01b0
|
[
"MIT"
] | null | null | null |
setup.py
|
MikeData/cmd-v4-hierarchy-parent-injector
|
3ef5687f2c6a8d9fd18fce863b022939cf1a01b0
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'hierarchyParentInjector',
packages = ['hierarchyParentInjector'],
version = '0.1',
description = 'Inserts additional rows into a V4 csv load file, for hierarcical parents nodes that are missing totals. ',
author = 'Michael Adams',
author_email = 'michael.adams@ons.gov.uk',
url = 'https://github.com/ONS-OpenData/hierarchyParentInjector',
download_url = 'https://github.com/ONS-OpenData/hierarchyParentInjector/archive/0.1.tar.gz',
keywords = ['extraction', 'hierachies', 'data'],
classifiers = [],
)
| 38
| 123
| 0.726316
|
0b31f9b9d6a369df93605cfd247bf154f41564c0
| 18,647
|
py
|
Python
|
tests/integration/cattletest/core/test_shared_volumes.py
|
moul/cattle
|
d682921b45fce95c0886c2a8a95e7e8345d30521
|
[
"Apache-2.0"
] | 1
|
2021-04-24T08:16:38.000Z
|
2021-04-24T08:16:38.000Z
|
tests/integration/cattletest/core/test_shared_volumes.py
|
moul/cattle
|
d682921b45fce95c0886c2a8a95e7e8345d30521
|
[
"Apache-2.0"
] | 1
|
2022-01-21T23:49:42.000Z
|
2022-01-21T23:49:42.000Z
|
tests/integration/cattletest/core/test_shared_volumes.py
|
moul/cattle
|
d682921b45fce95c0886c2a8a95e7e8345d30521
|
[
"Apache-2.0"
] | null | null | null |
from common_fixtures import * # NOQA
from cattle import ClientApiError
SP_CREATE = "storagepool.create"
VOLUME_CREATE = "volume.create"
def more_hosts(context):
host2 = register_simulated_host(context)
host3 = register_simulated_host(context)
return context.host, host2, host3
def from_context(context):
return context.client, context.agent_client, context.host
def add_storage_pool(context, host_uuids=None):
client, agent_client, host = from_context(context)
sp_name = 'convoy-%s' % random_str()
if not host_uuids:
host_uuids = [host.uuid]
create_sp_event(client, agent_client, context,
sp_name, sp_name, SP_CREATE, host_uuids, sp_name)
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
return storage_pool
def create_new_agent(super_client, project):
scope = 'io.cattle.platform.agent.connection.simulator' \
'.AgentConnectionSimulator'
uri = 'sim://{}'.format(random_str())
data = {scope: {}}
account_id = get_plain_id(super_client, project)
data[scope]['agentResourcesAccountId'] = account_id
data['agentResourcesAccountId'] = account_id
agent = super_client.create_agent(uri=uri, data=data)
agent = super_client.wait_success(agent)
assert agent.state == "active"
account = agent.account()
creds = filter(lambda x: x.kind == 'agentApiKey', account.credentials())
agent_client = api_client(creds[0].publicValue, creds[0].secretValue)
return agent, account, agent_client
def test_storage_pool_update(new_context, super_client):
client = new_context.client
sp = add_storage_pool(new_context)
original_agent = super_client.list_agent(accountId=new_context.agent.id)[0]
assert super_client.reload(sp).agentId == original_agent.id
new_agent, new_agent_account, new_client = \
create_new_agent(super_client, new_context.project)
uuids = [new_context.host.uuid]
create_sp_event(client, new_client, new_context, sp.name, sp.name,
SP_CREATE, uuids, sp.name, new_agent_account)
assert super_client.wait_success(sp).agentId == new_agent.id
sp = client.wait_success(sp)
assert sp.state == 'active'
def test_storage_pool_agent_delete(new_context, super_client):
client = new_context.client
sp = add_storage_pool(new_context)
original_agent = super_client.list_agent(accountId=new_context.agent.id)[0]
original_agent = super_client.wait_success(original_agent.deactivate())
original_agent = super_client.wait_success(original_agent.remove())
sp = client.reload(sp)
assert sp.state == 'active'
def test_multiple_sp_volume_schedule(new_context):
# Tests that when a host has more than one storage pool (one local, one
# shared), and a container is scheduled to it, the root volume can be
# properly scheduled.
client = new_context.client
add_storage_pool(new_context)
# The allocation bug that caused this issue is much more likely to occur
# when two containers are created back-to-back
c = client.create_container(imageUuid=new_context.image_uuid,
networkMode=None)
c2 = client.create_container(imageUuid=new_context.image_uuid,
networkMode=None)
c = client.wait_success(c)
assert c is not None
vols = c.volumes()
assert len(vols) == 1
vol_pools = vols[0].storagePools()
assert len(vol_pools) == 1
assert vol_pools[0].kind == 'sim'
c2 = client.wait_success(c2)
assert c2 is not None
vols = c2.volumes()
assert len(vols) == 1
vol_pools = vols[0].storagePools()
assert len(vol_pools) == 1
assert vol_pools[0].kind == 'sim'
def test_finding_shared_volumes(new_context):
# Tests that when a named is specified in dataVolumes and a volume of
# that name already exists in a shared storage pool, the pre-existing
# volume is used
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
name = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
name, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, name))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
path = '/container/path'
# Previously created volume should show up in dataVolumeMounts
data_volumes = ['%s:%s' % (name, path)]
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts[path] == volume.id
# Same behavior if volumeDriver == local
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=data_volumes)
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts[path] == volume.id
# Create another storage pool and add a volume of the same name to it
storage_pool = add_storage_pool(new_context)
sp_name2 = storage_pool.name
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
name, driver=sp_name2, uri=uri)
volume2 = wait_for(lambda: volume_in_sp_by_name_wait(name, storage_pool))
assert volume2.id != volume.id
# Container should not create successfully because name is ambiguous
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
with pytest.raises(ClientApiError):
client.wait_success(c)
# Even if the volume driver is specified, should fail
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver=sp_name2,
dataVolumes=data_volumes)
with pytest.raises(ClientApiError):
client.wait_success(c)
def test_data_volume_mounts(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
external_id = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
external_id, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, external_id))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
data_volume_mounts = {'/somedir': volume.id}
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c, timeout=240)
assert c.state == 'running'
assert c.dataVolumes[0] == '%s:/somedir' % external_id
def test_volume_create(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
add_storage_pool(new_context)
# Create a volume with a driver that points to a storage pool
v1 = client.create_volume(name=random_str(), driver=sp_name)
v1 = client.wait_success(v1)
sps = v1.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
# Create a volume with a driver that cattle doesn't know about
v2 = client.create_volume(name=random_str(), driver='driver-%s' %
random_str())
v2 = client.wait_success(v2)
data_volume_mounts = {'/con/path': v1.id,
'/con/path2': v2.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
v1 = client.wait_success(v1)
sps = v1.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
v2 = client.wait_success(v2)
sps = v2.storagePools()
assert len(sps) == 1
assert sps[0].kind == 'sim'
# Create a new, unmapped volume, assign to container via dataVolumes
# Should be translated to a dataVolumeMount entry.
v3 = client.create_volume(name=random_str(), driver=sp_name)
v3 = client.wait_success(v3)
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/foo' % v3.name])
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts['/foo'] == v3.id
v3 = client.wait_success(v3)
sps = v3.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
def create_and_map_volume(client, context):
name = random_str()
v = client.create_volume(name=name, driver='local')
v = client.wait_success(v)
c = client.wait_success(client.create_container(
imageUuid=context.image_uuid,
dataVolumeMounts={'/foo': v.id}))
assert c.state == 'running'
assert c.dataVolumeMounts['/foo'] == v.id
return name, v
def test_volume_affinity(new_context):
# When looking up named volumes for scheduling purposes, local volumes
# should not be ignored if the volume affinity label is present
client = new_context.client
n1, v1 = create_and_map_volume(client, new_context)
n2, v2 = create_and_map_volume(client, new_context)
n3, v3 = create_and_map_volume(client, new_context)
n4 = random_str()
v4 = client.create_volume(name=n4, driver='local')
c = client.create_container(imageUuid=new_context.image_uuid,
labels={'io.rancher.scheduler.affinity:'
'volumes': ','.join([n1, n3])},
dataVolumes=['%s:/p/n1' % n1,
'%s:/p/n2' % n2,
'%s:/p/n3' % n3,
'%s:/p/n4' % n4])
c = client.wait_success(c)
assert c.state == 'running'
# v1 is mapped, local driver, has affinity, should be found
# v2 is mapped, local driver, no affinity, should not be found
# v3 is mapped, random driver, has affinity, should be found
# v4 is unmapped, no affinity, should be found
assert len(c.dataVolumeMounts) == 3
assert c.dataVolumeMounts['/p/n1'] == v1.id
assert c.dataVolumeMounts['/p/n3'] == v3.id
assert c.dataVolumeMounts['/p/n4'] == v4.id
# Should fail to schedule because volume affinity conflicts with host
new_host = register_simulated_host(new_context)
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
requestedHostId=new_host.id,
labels={'io.rancher.scheduler.'
'affinity:volumes': n1},
dataVolumes=['%s:/foo' % n1])
client.wait_success(c)
assert e.value.message.startswith('Scheduling failed: valid host')
def test_volume_create_failed_allocation(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
add_storage_pool(new_context)
v1 = client.wait_success(client.create_volume(name=random_str(),
driver=sp_name))
assert v1.state == 'requested'
# Will fail because new_host is not in the storage_pool that v1 belongs to
new_host = register_simulated_host(new_context)
data_volume_mounts = {'/con/path': v1.id}
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
requestedHostId=new_host.id,
dataVolumeMounts=data_volume_mounts)
client.wait_success(c)
assert e.value.message.startswith('Scheduling failed: valid host')
# Put two volumes from mutually exclusive storage pools onto a container
# and it should fail to find placement
sp2 = add_storage_pool(new_context, [new_host.uuid])
v2 = client.create_volume(name=random_str(), driver=sp2.name)
v2 = client.wait_success(v2)
assert v1.state == 'requested'
data_volume_mounts['/con/path2'] = v2.id
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
client.wait_success(c)
assert e.value.message.startswith('Scheduling failed')
def test_external_volume_event(super_client, new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
external_id = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
external_id, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, external_id))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
assert volume.state == 'inactive'
assert volume.externalId == external_id
assert volume.name == external_id
assert volume.driver == sp_name
assert volume.uri == uri
assert volume.isHostPath is False
super_volume = super_client.by_id('volume', volume.id)
assert super_volume.deviceNumber == -1
assert super_volume.format == 'docker'
# Send event again to ensure two volumes are not created
create_volume_event(client, agent_client, new_context,
VOLUME_CREATE, external_id, driver=sp_name, uri=uri)
volumes = client.list_volume(externalId=external_id)
assert len(volumes) == 1
def test_external_storage_pool_event(new_context):
client, agent_client, host = from_context(new_context)
sp_name = 'convoy-%s' % random_str()
# Create a new storage pool with a single host
uuids = [host.uuid]
create_sp_event(client, agent_client, new_context,
sp_name, sp_name, SP_CREATE, uuids, sp_name)
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
assert storage_pool.externalId == sp_name
assert storage_pool.name == sp_name
assert storage_pool.driverName == sp_name
hosts = wait_for(lambda: wait_host_count(storage_pool, 1))
assert len(hosts) == 1
assert hosts[0].uuid == host.uuid
# Send event again to ensure a second storage pool is not created
create_sp_event(client, agent_client, new_context,
sp_name, sp_name, SP_CREATE, uuids, sp_name)
# Add a second host
host2 = register_simulated_host(new_context)
uuids.append(host2.uuid)
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 2))
host_ids = [h.id for h in hosts]
assert host.id in host_ids
assert host2.id in host_ids
# Remove a host
uuids.pop(0)
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 1))
assert host2.id in hosts[0].id
# Send empty host list
uuids = []
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 0))
assert len(hosts) == 0
def create_volume_event(client, agent_client, context, event_type,
external_id, driver=None, uri=None):
vol_event = {
'externalId': external_id,
'eventType': event_type,
'volume': {
'externalId': external_id,
'name': external_id,
'driver': driver,
'uri': uri,
'format': 'docker',
'isHostPath': False,
}
}
event = agent_client.create_external_volume_event(vol_event)
assert event.externalId == external_id
assert event.eventType == event_type
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
assert event.reportedAccountId == context.agent.id
return event
def create_sp_event(client, agent_client, context, external_id, name,
event_type, host_uuids, driver_name, agent_account=None):
event = agent_client.create_external_storage_pool_event(
externalId=external_id,
eventType=event_type,
hostUuids=host_uuids,
storagePool={
'name': name,
'externalId': external_id,
'driverName': driver_name,
})
assert event.externalId == external_id
assert event.eventType == event_type
assert event.hostUuids == host_uuids
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
if agent_account:
assert event.reportedAccountId == agent_account.id
else:
assert event.reportedAccountId == context.agent.id
return event
def sp_wait(client, external_id):
storage_pools = client.list_storage_pool(externalId=external_id)
if len(storage_pools) and storage_pools[0].state == 'active':
return storage_pools[0]
def volume_in_sp_by_name_wait(name, storage_pool):
volumes = storage_pool.volumes(name=name)
if len(volumes) and volumes[0].state == 'inactive':
return volumes[0]
def volume_wait(client, external_id):
volumes = client.list_volume(externalId=external_id)
if len(volumes) and volumes[0].state == 'inactive':
return volumes[0]
def wait_host_count(storage_pool, count):
new_hosts = storage_pool.hosts()
if len(new_hosts) == count:
return new_hosts
def volume_in_sp(client, volume, storage_pool):
volumes = storage_pool.volumes()
if len(volumes) > 0:
for v in volumes:
if v.id == volume.id:
return volume
def event_wait(client, event):
created = client.by_id('externalEvent', event.id)
if created is not None and created.state == 'created':
return created
| 38.055102
| 79
| 0.660911
|
e1e94356ebbe9a698af58cc76d74d9338e6c0446
| 7,312
|
py
|
Python
|
tests/backward_compatibility/tabor_backward_compatibility_tests.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 30
|
2018-09-13T02:59:55.000Z
|
2022-03-21T04:25:22.000Z
|
tests/backward_compatibility/tabor_backward_compatibility_tests.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 220
|
2018-09-06T14:43:15.000Z
|
2022-03-25T12:26:25.000Z
|
tests/backward_compatibility/tabor_backward_compatibility_tests.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 14
|
2019-01-08T14:42:36.000Z
|
2021-05-21T08:53:06.000Z
|
import unittest
import os
import json
import typing
import importlib.util
import sys
import warnings
from tests.hardware.tabor_simulator_based_tests import TaborSimulatorManager
from tests.hardware.dummy_devices import DummyDAC
from tests.backward_compatibility.hardware_test_helper import LoadingAndSequencingHelper
from qupulse.serialization import Serializer, FilesystemBackend, PulseStorage
from qupulse.pulses.pulse_template import PulseTemplate
from qupulse.hardware.setup import HardwareSetup, PlaybackChannel, MarkerChannel, MeasurementMask
try:
import tabor_control
except ImportError:
tabor_control = None
if tabor_control is not None:
from qupulse.hardware.awgs.tabor import PlottableProgram, TaborAWGRepresentation
def do_not_skip(test_class):
if hasattr(test_class, '__unittest_skip__'):
test_class.__unittest_skip__ = False
return test_class
def is_test_skipped(test):
if hasattr(test, '__unittest_skip__'):
return test.__unittest_skip__
else:
return False
class DummyTest(unittest.TestCase):
def test_dummy(self):
self.assertTrue(True)
@unittest.skipIf(tabor_control is None, "tabor_control not available")
class TaborLoadingAndSequencingHelper(LoadingAndSequencingHelper):
def __init__(self, data_folder, pulse_name):
super().__init__(data_folder=data_folder, pulse_name=pulse_name)
self.preparation_commands = self.load_json('tabor_preparation_commands.json')
expected_binary_programs = self.load_json('binary_programs.json')
if expected_binary_programs:
self.expected_binary_programs = [PlottableProgram.from_builtin(prog) if prog else None
for prog in expected_binary_programs]
else:
self.expected_binary_programs = None
self.validate_programs = self.load_function_from_file('binary_program_validation.py', 'validate_programs')
self.validation_data = self.load_json('binary_program_validation.json')
self.program_AB = None
self.program_CD = None
def initialize_hardware_setup(self):
self.simulator_manager = TaborSimulatorManager(TaborAWGRepresentation, 'instr_addr',
dict(paranoia_level=2, reset=True))
try:
self.simulator_manager.start_simulator()
except RuntimeError as err:
raise unittest.SkipTest(*err.args) from err
self.awg = self.simulator_manager.connect()
if self.preparation_commands:
for cmd in self.preparation_commands:
self.awg.send_cmd(cmd)
self.dac = DummyDAC()
self.hardware_setup = HardwareSetup()
self.hardware_setup.set_channel('TABOR_A', PlaybackChannel(self.awg.channel_pair_AB, 0))
self.hardware_setup.set_channel('TABOR_B', PlaybackChannel(self.awg.channel_pair_AB, 1))
self.hardware_setup.set_channel('TABOR_A_MARKER', MarkerChannel(self.awg.channel_pair_AB, 0))
self.hardware_setup.set_channel('TABOR_B_MARKER', MarkerChannel(self.awg.channel_pair_AB, 1))
self.hardware_setup.set_channel('TABOR_C', PlaybackChannel(self.awg.channel_pair_CD, 0))
self.hardware_setup.set_channel('TABOR_D', PlaybackChannel(self.awg.channel_pair_CD, 1))
self.hardware_setup.set_channel('TABOR_C_MARKER', MarkerChannel(self.awg.channel_pair_CD, 0))
self.hardware_setup.set_channel('TABOR_D_MARKER', MarkerChannel(self.awg.channel_pair_CD, 1))
self.hardware_setup.set_measurement("MEAS_A", MeasurementMask(self.dac, "MASK_A"))
self.hardware_setup.set_measurement("MEAS_B", MeasurementMask(self.dac, "MASK_B"))
self.hardware_setup.set_measurement("MEAS_C", MeasurementMask(self.dac, "MASK_C"))
self.hardware_setup.set_measurement("MEAS_D", MeasurementMask(self.dac, "MASK_D"))
def read_program(self):
self.program_AB = self.awg.channel_pair_AB.read_complete_program()
self.program_CD = self.awg.channel_pair_CD.read_complete_program()
return self.program_AB, self.program_CD
class CompleteIntegrationTestHelper(unittest.TestCase):
data_folder = None
pulse_name = None
@classmethod
def setUpClass(cls):
if cls.data_folder is None:
raise unittest.SkipTest("Base class")
cls.test_state = TaborLoadingAndSequencingHelper(cls.data_folder, cls.pulse_name)
def test_1_1_deserialization(self):
with self.assertWarns(DeprecationWarning):
self.test_state.deserialize_pulse()
def test_1_2_deserialization_2018(self) -> None:
self.test_state.deserialize_pulse_2018()
def test_2_1_sequencing(self):
if self.test_state.pulse is None:
self.skipTest("deserialization failed")
self.test_state.sequence_pulse()
def test_3_1_initialize_hardware_setup(self):
self.test_state.initialize_hardware_setup()
def test_4_1_register_program(self):
if self.test_state.hardware_setup is None:
self.skipTest("No hardware setup")
self.test_state.register_program()
self.assertIn(self.pulse_name, self.test_state.hardware_setup.registered_programs)
def test_5_1_arm_program(self):
if self.test_state.hardware_setup is None:
self.skipTest("No hardware setup")
if self.pulse_name not in self.test_state.hardware_setup.registered_programs:
self.skipTest("Program is not registered")
self.test_state.hardware_setup.arm_program(self.pulse_name)
self.assertEqual(self.test_state.awg.channel_pair_AB._current_program, self.pulse_name,
"Program not armed on AB")
self.assertEqual(self.test_state.awg.channel_pair_CD._current_program, self.pulse_name,
"Program not armed on CD")
def test_6_1_read_program(self):
if self.test_state.hardware_setup is None:
self.skipTest("No hardware setup")
if self.test_state.awg.channel_pair_AB._current_program != self.pulse_name:
self.skipTest("Program not armed on AB")
if self.test_state.awg.channel_pair_CD._current_program != self.pulse_name:
self.skipTest("Program not armed on CD")
self.test_state.read_program()
def test_7_1_verify_program(self):
if self.test_state.hardware_setup is None:
self.skipTest("No hardware setup")
if self.test_state.expected_binary_programs is not None:
self.assertEqual(self.test_state.expected_binary_programs[0], self.test_state.program_AB)
self.assertEqual(self.test_state.expected_binary_programs[1], self.test_state.program_CD)
elif self.test_state.validate_programs:
self.test_state.validate_programs(self.test_state.program_AB,
self.test_state.program_CD,
self.test_state.validation_data,
self.test_state.parameters)
else:
self.skipTest("No expected programs given.")
class ChargeScan1Tests(CompleteIntegrationTestHelper):
data_folder = os.path.join(os.path.dirname(__file__), 'charge_scan_1')
pulse_name = 'charge_scan'
| 43.266272
| 114
| 0.711844
|
e09cbead5280a0460aa71aef688bfa39134f0c93
| 2,271
|
py
|
Python
|
build/license.py
|
coolofficials/hafnium-verification
|
5483868545cae54fbe02c044a02ba0f4999f1f8b
|
[
"Apache-2.0"
] | 30
|
2019-06-28T15:20:25.000Z
|
2021-08-25T11:37:30.000Z
|
build/license.py
|
coolofficials/hafnium-verification
|
5483868545cae54fbe02c044a02ba0f4999f1f8b
|
[
"Apache-2.0"
] | 75
|
2019-07-16T04:23:53.000Z
|
2021-03-26T15:35:14.000Z
|
build/license.py
|
coolofficials/hafnium-verification
|
5483868545cae54fbe02c044a02ba0f4999f1f8b
|
[
"Apache-2.0"
] | 17
|
2019-06-28T14:40:41.000Z
|
2021-03-11T19:26:13.000Z
|
#!/usr/bin/env python
#
# Copyright 2018 The Hafnium Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add license header to source files.
If the file doesn't have the license header, add it with the appropriate comment
style.
"""
import argparse
import datetime
import re
import sys
apache2 = """{comment} Copyright {year} The Hafnium Authors.
{comment}
{comment} Licensed under the Apache License, Version 2.0 (the "License");
{comment} you may not use this file except in compliance with the License.
{comment} You may obtain a copy of the License at
{comment}
{comment} https://www.apache.org/licenses/LICENSE-2.0
{comment}
{comment} Unless required by applicable law or agreed to in writing, software
{comment} distributed under the License is distributed on an "AS IS" BASIS,
{comment} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{comment} See the License for the specific language governing permissions and
{comment} limitations under the License."""
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("file")
parser.add_argument("--style", choices=["c", "hash"], required=True)
args = parser.parse_args()
header = "/*\n" if args.style == "c" else ""
year = str(datetime.datetime.now().year)
header += apache2.format(comment=" *" if args.style == "c" else "#", year=year)
header += "\n */" if args.style == "c" else ""
header += "\n\n"
header_regex = re.escape(header).replace(year, r"\d\d\d\d")
with open(args.file, "r") as f:
contents = f.read()
if re.search(header_regex, contents):
return
with open(args.file, "w") as f:
f.write(header)
f.write(contents)
if __name__ == "__main__":
sys.exit(Main())
| 35.484375
| 83
| 0.702334
|
e723a007c888a21ddec843d01a44a9465f44e390
| 3,889
|
py
|
Python
|
scholariumat/products/utils.py
|
valuehack/scholariumat
|
47c13f3429b95b9ad5ca59b45cf971895260bb5c
|
[
"MIT"
] | null | null | null |
scholariumat/products/utils.py
|
valuehack/scholariumat
|
47c13f3429b95b9ad5ca59b45cf971895260bb5c
|
[
"MIT"
] | 232
|
2018-06-30T11:40:52.000Z
|
2020-04-29T23:55:41.000Z
|
scholariumat/products/utils.py
|
valuehack/scholariumat
|
47c13f3429b95b9ad5ca59b45cf971895260bb5c
|
[
"MIT"
] | 3
|
2018-05-31T12:57:03.000Z
|
2020-02-27T16:25:44.000Z
|
import requests
from requests.auth import HTTPBasicAuth
import xml.dom.minidom
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
class SofortPayment(object):
"""
Idee der Sofort-Api-Schnittstelle:
mit jedem Api-Aufruf müssen Authentifizierungsdaten mitgeschickt werden
die sind aus dem Klassenattribut verfügbar
den Aufrufen sind xml-Dateien anzuhängen, die die Daten enthalten
die Zahlung hat eine id bei sofort und gehört zu einem payment in der lokalen DB
neben den Daten der Zahlung (Betrag, Betreff, etc) akzeptiert die api drei urls,
die die Defaultwerte aus dem Sofort-Kundenkonto überschreiben:
für die Weiterleitung des Kunden bei Erfolg, bei Fehler, und für
die Benachrichtigung vom Shop bei jeglicher späteren Statusänderung
"""
url = 'https://api.sofort.com/api/xml'
pw = settings.SOFORT_KEY
headers = {
'Content-Type': 'application/xml',
'Accept': 'application/xml',
}
project_id = settings.SOFORT_PROJECT_ID
success_url = 'https://scholarium.at/'
abort_url = 'https://scholarium.at/'
creation_template = """<?xml version="1.0" encoding="UTF-8" ?>
<multipay>
<project_id>{project_id}</project_id>
<interface_version>testilja</interface_version>
<amount>{amount}</amount>
<currency_code>EUR</currency_code>
<beneficiary>
<identifier>scholarium</identifier>
<country_code>AT</country_code>
</beneficiary>
<reasons>
<reason>{reason}</reason>
<reason>-TRANSACTION-</reason>
</reasons>
<user_variables>
<user_variable>spam</user_variable>
</user_variables>
<success_url>{success_url}</success_url>
<success_link_redirect>1</success_link_redirect>
<abort_url>{abort_url}</abort_url>
<notification_urls>
<notification_url>https://scholarium.at/spam</notification_url>
</notification_urls>
<su />
</multipay>
"""
def creation_string(self, **kwargs):
params = dict(
amount=75.0,
reason='Spende scholarium.at',
success_url=self.success_url,
abort_url=self.abort_url,
project_id=self.project_id,
)
params.update(kwargs)
return self.creation_template.format(**params)
def __init__(self, **kwargs):
""" Initializes new payment """
response = self.post(self.creation_string(**kwargs))
self.init_response = response
if response.status_code != 200 or 'error' in response.text:
logger.exception(f'Failed initiating SOFORT payment. Status code: {response.status_code}, text: {response.text}')
raise Exception('SOFORT init failed.')
self.return_url = response.text.split('payment_url')[1][1:-2]
self.sofort_id = response.text.split('<transaction>')[1].split('</transaction>')[0]
return None
@classmethod
def check_status(cls, sofort_id):
""" Ruft die Daten zu einer Transaktions-id ab
theoretisch kann man statt nach der Nummer auch nach allen Transaktionen
in einem Zeitraum fragen, das habe ich aber nicht implementiert. """
response = cls.post("""<?xml version="1.0" encoding="UTF-8" ?>
<transaction_request version="2">
<transaction>{sofort_id}</transaction>
</transaction_request>""".format(sofort_id=sofort_id))
logger.info(xml.dom.minidom.parseString(response.text).toprettyxml())
return response
@classmethod
def post(cls, text):
""" Anfrage an die Sofort-Api """
r = requests.post(
cls.url,
auth=HTTPBasicAuth('120628', cls.pw),
headers=cls.headers,
data=text,
)
return r
| 34.723214
| 125
| 0.642582
|
24033bf8f6bd4a8bfd1da1aed4432bb52102e607
| 686
|
py
|
Python
|
proj01/proj01.py
|
boredtorchic/Birb_proj
|
c87b321727cca2c43b7611f4f70d6db6a322b78c
|
[
"MIT"
] | null | null | null |
proj01/proj01.py
|
boredtorchic/Birb_proj
|
c87b321727cca2c43b7611f4f70d6db6a322b78c
|
[
"MIT"
] | null | null | null |
proj01/proj01.py
|
boredtorchic/Birb_proj
|
c87b321727cca2c43b7611f4f70d6db6a322b78c
|
[
"MIT"
] | null | null | null |
# Name:
# Date:
# proj01: A Simple Program
# This program asks the user for his/her name and age.
# Then, it prints a sentence that says when the user will turn 100.
# If you
User_input1 = raw_input("Enter your name")
User_input2 = raw_input("Enter your age:")
User_input3 = raw_input("Have you had your birthday yet? Y/N:")
if User_input3 == "Y":
My_str4= 101 - int(User_input2)
else:
My_str4 = 100 - int(User_input2)
N = 2017
for year in range(100- int(User_input2)):
N = N + 1
My_str = "will be 100 years old in"
My_str2= "years! Which is the year"
My_str3= "so you better start living your life."
print User_input1, My_str, 100- int(User_input2), My_str2, N, My_str3
| 28.583333
| 69
| 0.705539
|
4e8988ce69d66bc9c4adababee666b2501460f7f
| 966
|
py
|
Python
|
setup.py
|
Mortafix/Colorifix
|
5ea3c72403a53032054c35feda320522b1fce71d
|
[
"MIT"
] | null | null | null |
setup.py
|
Mortafix/Colorifix
|
5ea3c72403a53032054c35feda320522b1fce71d
|
[
"MIT"
] | null | null | null |
setup.py
|
Mortafix/Colorifix
|
5ea3c72403a53032054c35feda320522b1fce71d
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="colorifix",
version="2.0.2",
author="Moris Doratiotto",
author_email="moris.doratiotto@gmail.com",
description="A python module to color your terminal output life",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mortafix/Colorifix",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
],
python_requires=">=3.6",
keywords=["color", "bash", "terminal", "crayons"],
)
| 33.310345
| 69
| 0.638716
|
fb23aa3abf9f91a447c8e488098ce23ee4c7b396
| 4,981
|
py
|
Python
|
robote/__main__.py
|
gufimov/maybe-delete-as-f-
|
836cef51dd26bb132b8714820ad0eb1c008918f1
|
[
"MIT"
] | 1
|
2021-06-28T19:45:51.000Z
|
2021-06-28T19:45:51.000Z
|
robote/__main__.py
|
gufimov/maybe-delete-as-f-
|
836cef51dd26bb132b8714820ad0eb1c008918f1
|
[
"MIT"
] | null | null | null |
robote/__main__.py
|
gufimov/maybe-delete-as-f-
|
836cef51dd26bb132b8714820ad0eb1c008918f1
|
[
"MIT"
] | 4
|
2020-11-19T17:43:57.000Z
|
2021-12-22T09:40:51.000Z
|
#!/usr/bin/env python3
# - * -coding: utf - 8 - * -
import os
import io
import sys
import traceback
from robote import(DOWNLOAD_LOCATION, TEGE_TOKEN, TEGE_API_ID, TEGE_API_HASH, AUTH_CHANNEL, LEECH_COMMAND, YTDL_COMMAND, GLEECH_COMMAND, TELEGRAM_LEECH_COMMAND_G, CANCEL_COMMAND_G, GET_SIZE_G, STATUS_COMMAND, SAVE_THUMBNAIL, CLEAR_THUMBNAIL, PYTDL_COMMAND_G, LOG_COMMAND)
from pyrogram import Client, Filters, MessageHandler, CallbackQueryHandler
from robote.started.new_join_fn import new_join_f, help_message_f, rename_message_f
from robote.started.incoming_message_fn import incoming_message_f, incoming_youtube_dl_f, incoming_purge_message_f, incoming_gdrive_message_f, g_yt_playlist
from robote.started.memek_size import check_size_g, g_clearme
from robote.started.status_message_fn import(status_message_f, cancel_message_f, exec_message_f, upload_document_f, upload_log_file)
from robote.started.callback_btn_handler import button
from robote.started.thumbnail_video import(save_thumb_nail, clear_thumb_nail)
from robote.heroku.download import down_load_media_f
if __name__ == "__main__":
if not os.path.isdir(DOWNLOAD_LOCATION):
os.makedirs(DOWNLOAD_LOCATION)
app = Client(
"robote_lokal",
bot_token = TEGE_TOKEN,
api_id = TEGE_API_ID,
api_hash = TEGE_API_HASH
)
callback_btn_handler = CallbackQueryHandler(button)
app.add_handler(callback_btn_handler)
cancel_message_handler = MessageHandler(cancel_message_f, filters = Filters.command([f"{CANCEL_COMMAND_G}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(cancel_message_handler)
clear_thumb_nail_handler = MessageHandler(clear_thumb_nail, filters = Filters.command([f"{CLEAR_THUMBNAIL}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(clear_thumb_nail_handler)
exec_message_handler = MessageHandler(exec_message_f, filters = Filters.command(["exec"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(exec_message_handler)
group_new_join_handler = MessageHandler(help_message_f, filters = Filters.chat(chats = AUTH_CHANNEL) & Filters.new_chat_members)
app.add_handler(group_new_join_handler)
help_text_handler = MessageHandler(help_message_f, filters = Filters.command(["help"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(help_text_handler)
incoming_g_clear_handler = MessageHandler(g_clearme, filters = Filters.command(["renewme"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_g_clear_handler)
incoming_gdrive_message_handler = MessageHandler(incoming_gdrive_message_f, filters = Filters.command([f"{GLEECH_COMMAND}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_gdrive_message_handler)
incoming_message_handler = MessageHandler(incoming_message_f, filters = Filters.command([f"{LEECH_COMMAND}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_message_handler)
incoming_purge_message_handler = MessageHandler(incoming_purge_message_f, filters = Filters.command(["purge"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_purge_message_handler)
incoming_size_checker_handler = MessageHandler(check_size_g, filters = Filters.command([f"{GET_SIZE_G}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_size_checker_handler)
incoming_telegram_download_handler = MessageHandler(down_load_media_f, filters = Filters.command([f"{TELEGRAM_LEECH_COMMAND_G}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_telegram_download_handler)
incoming_youtube_dl_handler = MessageHandler(incoming_youtube_dl_f, filters = Filters.command([f"{YTDL_COMMAND}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_youtube_dl_handler)
incoming_youtube_playlist_dl_handler = MessageHandler(g_yt_playlist, filters = Filters.command([f"{PYTDL_COMMAND_G}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(incoming_youtube_playlist_dl_handler)
new_join_handler = MessageHandler(new_join_f, filters = ~Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(new_join_handler)
rename_message_handler = MessageHandler(rename_message_f, filters = Filters.command(["rename@hahahahahaha"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(rename_message_handler)
save_thumb_nail_handler = MessageHandler(save_thumb_nail, filters = Filters.command([f"{SAVE_THUMBNAIL}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(save_thumb_nail_handler)
status_message_handler = MessageHandler(status_message_f, filters = Filters.command([f"{STATUS_COMMAND}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(status_message_handler)
upload_document_handler = MessageHandler(upload_document_f, filters = Filters.command(["upload"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(upload_document_handler)
upload_log_handler = MessageHandler(upload_log_file, filters = Filters.command([f"{LOG_COMMAND}"]) & Filters.chat(chats = AUTH_CHANNEL))
app.add_handler(upload_log_handler)
app.run()
| 71.157143
| 271
| 0.816101
|
c85d1aad36359b7dce59d71fb00a6e6472928abc
| 4,951
|
py
|
Python
|
timesketch/models/user.py
|
rushattac/timesketch
|
0a6fa301aadb36db450ec993c5cf51452c375de2
|
[
"Apache-2.0"
] | 1,810
|
2015-01-03T22:34:45.000Z
|
2022-03-30T10:23:18.000Z
|
timesketch/models/user.py
|
rushattac/timesketch
|
0a6fa301aadb36db450ec993c5cf51452c375de2
|
[
"Apache-2.0"
] | 1,291
|
2015-01-08T00:00:12.000Z
|
2022-03-29T03:26:58.000Z
|
timesketch/models/user.py
|
rushattac/timesketch
|
0a6fa301aadb36db450ec993c5cf51452c375de2
|
[
"Apache-2.0"
] | 519
|
2015-01-20T09:26:06.000Z
|
2022-03-29T11:02:10.000Z
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the user model."""
from __future__ import unicode_literals
import codecs
import six
from flask_bcrypt import generate_password_hash
from flask_bcrypt import check_password_hash
from flask_login import UserMixin
from sqlalchemy.types import Boolean
from sqlalchemy import Column
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from timesketch.models import BaseModel
from timesketch.models.annotations import LabelMixin
from timesketch.models.annotations import StatusMixin
# Helper table for Groups many-to-many relationship.
user_group = Table('user_group', BaseModel.metadata,
Column('user_id', Integer(), ForeignKey('user.id')),
Column('group_id', Integer(), ForeignKey('group.id')),
PrimaryKeyConstraint('user_id', 'group_id'))
class User(UserMixin, BaseModel):
"""Implements the User model."""
username = Column(Unicode(255), unique=True)
password = Column(Unicode(128))
name = Column(Unicode(255))
email = Column(Unicode(255))
active = Column(Boolean(), default=True)
admin = Column(Boolean(), default=False)
sketches = relationship('Sketch', backref='user', lazy='dynamic')
searchindices = relationship(
'SearchIndex', backref='user', lazy='dynamic')
timelines = relationship('Timeline', backref='user', lazy='dynamic')
views = relationship('View', backref='user', lazy='dynamic')
searchhistories = relationship(
'SearchHistory', backref='user', lazy='dynamic')
stories = relationship('Story', backref='user', lazy='dynamic')
aggregations = relationship('Aggregation', backref='user', lazy='dynamic')
datasources = relationship('DataSource', backref='user', lazy='dynamic')
aggregationgroups = relationship(
'AggregationGroup', backref='user', lazy='dynamic')
my_groups = relationship('Group', backref='user', lazy='dynamic')
groups = relationship(
'Group',
secondary=user_group,
backref=backref('users', lazy='dynamic'))
def __init__(self, username, name=None):
"""Initialize the User object.
Args:
username: Username for the user
name: Name of the user
"""
super().__init__()
self.username = username
self.name = name
if not name:
self.name = username
def set_password(self, plaintext, rounds=12):
"""Sets the password for the user. The password hash is created with the
Bcrypt python library (http://www.mindrot.org/projects/py-bcrypt/).
Args:
plaintext: The plaintext password to hash
rounds: Number of rounds to use for the bcrypt hashing
"""
password_hash = generate_password_hash(plaintext, rounds)
if isinstance(password_hash, six.binary_type):
password_hash = codecs.decode(password_hash, 'utf-8')
self.password = password_hash
def check_password(self, plaintext):
"""Check a plaintext password against a stored password hash.
Args:
plaintext: A plaintext password
Returns:
A boolean value indicating if the plaintext password matches the
stored password hash.
"""
return check_password_hash(self.password, plaintext)
class Group(LabelMixin, StatusMixin, BaseModel):
"""Implements the Group model."""
name = Column(Unicode(255), unique=True)
display_name = Column(Unicode(255))
description = Column(UnicodeText())
user_id = Column(Integer, ForeignKey('user.id'))
def __init__(self, name, display_name=None, description=None, user=None):
"""Initialize the Group object.
Args:
name: Name of the group
display_name: User friendly name of the group
description: Description of the group
user: Creator (instance of timesketch.models.user.User)
"""
super().__init__()
self.name = name
self.display_name = display_name or name
self.description = description or name
self.user = user
| 36.674074
| 80
| 0.686124
|
fdcef74debd1b7b1e22646725d83d63087631e74
| 2,444
|
py
|
Python
|
.history/ClassFiles/OOP/Polymorphism_20210105142603.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/OOP/Polymorphism_20210105142603.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/OOP/Polymorphism_20210105142603.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
''' Polymorphism : Part 1
Polymorphism basically means the ability to take or have multiple or various forms.
Polymorphim means the ability to take or have various forms.
Polymorphism: Part 2 30
# Polymorphism: Part 3 60
Creating a polymorphic class method.
Create Polymorphism by using an existing method on a new function.
Polymorphism : Part 1
'''
print(len("Hello World!")) # 12
print(len([20,40,80])) # 3
# # Polymorphism: Part 2
# def addNumbers(a,b,c=1):
# return a + b + c
# print(addNumbers(8,9)) # 18 ## What I am doing here is passing arguments for the function parameters,
# print(addNumbers(8,9,4)) # 21 ## Here I changed the default value of c.
# class UK():
# def capital_city(self):
# print("London is the capital of UK")
# def language(self):
# print("English is the primary language ")
# class Spain():
# def capital_city(self):
# print("Madrid is the capital of Spain")
# def language(self):
# print("Spanish is the primary language ")
# Polymorphism: Part 3
def addNumbers(a,b,c=1):
return a + b + c
print(addNumbers(8,9)) # 18 ## What I am doing here is passing arguments for the function parameters,
print(addNumbers(8,9,4)) # 21 ## Here I changed the default value of c.
class UK():
def capital_city(self):
print("London is the capital of UK")
def language(self):
print("English is the primary language ")
class Spain():
def capital_city(self):
print("Madrid is the capital of Spain")
def language(self):
print("Spanish is the primary language ")
def europe(eu)
queen = UK() # Instantiation of the class UK
queen.capital_city() # This method is now attached to the instance of the class called UK.
zara = Spain()
zara.capital_city()
for country in (queen,zara):
country.capital_city()
country.language() # London is the capital of UK ## created by looping through the for loop by accessing the print classes on lines (40, 43, 47, 51).
# Madrid is the capital of Spain
# London is the capital of UK
# English is the primary language
# Madrid is the capital of Spain
# Spanish is the primary language
| 23.960784
| 155
| 0.605974
|
0e0bba36448441dae879da4f6f12ff91c16d1ce2
| 385
|
py
|
Python
|
pycomlink/spatial/__init__.py
|
cchwala/pycomlink
|
979d5595243a06278052023cc5f9aec09fbd8e12
|
[
"BSD-3-Clause"
] | 12
|
2017-04-26T07:27:39.000Z
|
2021-08-31T13:23:59.000Z
|
pycomlink/spatial/__init__.py
|
cchwala/pycomlink
|
979d5595243a06278052023cc5f9aec09fbd8e12
|
[
"BSD-3-Clause"
] | 72
|
2017-08-24T20:15:24.000Z
|
2022-03-09T08:55:03.000Z
|
pycomlink/spatial/__init__.py
|
cchwala/pycomlink
|
979d5595243a06278052023cc5f9aec09fbd8e12
|
[
"BSD-3-Clause"
] | 18
|
2016-11-08T15:33:12.000Z
|
2021-07-13T11:55:19.000Z
|
# ----------------------------------------------------------------------------
# Name:
# Purpose:
#
# Authors:
#
# Created:
# Copyright: (c) Christian Chwala 2014
# Licence: The MIT License
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from . import idw
from . import interpolator
from . import coverage
| 24.0625
| 78
| 0.420779
|
485aaf28b243688fad464a810af835ae4e0853d2
| 110,029
|
py
|
Python
|
mfem/_ser/coefficient.py
|
mfem/PyMFEM
|
b7b7c3d3de1082eac1015e3a313cf513db06fd7b
|
[
"BSD-3-Clause"
] | 93
|
2017-03-01T16:45:33.000Z
|
2022-03-27T22:10:33.000Z
|
mfem/_ser/coefficient.py
|
mfem/PyMFEM
|
b7b7c3d3de1082eac1015e3a313cf513db06fd7b
|
[
"BSD-3-Clause"
] | 64
|
2017-03-15T21:47:31.000Z
|
2022-03-31T23:59:00.000Z
|
mfem/_ser/coefficient.py
|
mfem/PyMFEM
|
b7b7c3d3de1082eac1015e3a313cf513db06fd7b
|
[
"BSD-3-Clause"
] | 32
|
2017-03-02T22:13:38.000Z
|
2022-03-26T13:09:31.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _coefficient
else:
import _coefficient
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _coefficient.SWIG_PyInstanceMethod_New
_swig_new_static_method = _coefficient.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.globals
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.matrix
import mfem._ser.vector
import mfem._ser.operators
import mfem._ser.intrules
import mfem._ser.sparsemat
import mfem._ser.densemat
import mfem._ser.eltrans
import mfem._ser.fe
import mfem._ser.geom
class Coefficient(object):
r"""Proxy of C++ mfem::Coefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetTime(self, t):
r"""SetTime(Coefficient self, double t)"""
return _coefficient.Coefficient_SetTime(self, t)
SetTime = _swig_new_instance_method(_coefficient.Coefficient_SetTime)
def GetTime(self):
r"""GetTime(Coefficient self) -> double"""
return _coefficient.Coefficient_GetTime(self)
GetTime = _swig_new_instance_method(_coefficient.Coefficient_GetTime)
def Eval(self, *args):
r"""
Eval(Coefficient self, ElementTransformation T, IntegrationPoint ip) -> double
Eval(Coefficient self, ElementTransformation T, IntegrationPoint ip, double t) -> double
"""
return _coefficient.Coefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.Coefficient_Eval)
__swig_destroy__ = _coefficient.delete_Coefficient
# Register Coefficient in _coefficient:
_coefficient.Coefficient_swigregister(Coefficient)
class ConstantCoefficient(Coefficient):
r"""Proxy of C++ mfem::ConstantCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
constant = property(_coefficient.ConstantCoefficient_constant_get, _coefficient.ConstantCoefficient_constant_set, doc=r"""constant : double""")
def __init__(self, c=1.0):
r"""__init__(ConstantCoefficient self, double c=1.0) -> ConstantCoefficient"""
_coefficient.ConstantCoefficient_swiginit(self, _coefficient.new_ConstantCoefficient(c))
def Eval(self, T, ip):
r"""Eval(ConstantCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.ConstantCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.ConstantCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_ConstantCoefficient
# Register ConstantCoefficient in _coefficient:
_coefficient.ConstantCoefficient_swigregister(ConstantCoefficient)
class PWConstCoefficient(Coefficient):
r"""Proxy of C++ mfem::PWConstCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(PWConstCoefficient self, int NumOfSubD=0) -> PWConstCoefficient
__init__(PWConstCoefficient self, Vector c) -> PWConstCoefficient
"""
_coefficient.PWConstCoefficient_swiginit(self, _coefficient.new_PWConstCoefficient(*args))
def UpdateConstants(self, c):
r"""UpdateConstants(PWConstCoefficient self, Vector c)"""
return _coefficient.PWConstCoefficient_UpdateConstants(self, c)
UpdateConstants = _swig_new_instance_method(_coefficient.PWConstCoefficient_UpdateConstants)
def __call__(self, i):
r"""__call__(PWConstCoefficient self, int i) -> double &"""
return _coefficient.PWConstCoefficient___call__(self, i)
__call__ = _swig_new_instance_method(_coefficient.PWConstCoefficient___call__)
def GetNConst(self):
r"""GetNConst(PWConstCoefficient self) -> int"""
return _coefficient.PWConstCoefficient_GetNConst(self)
GetNConst = _swig_new_instance_method(_coefficient.PWConstCoefficient_GetNConst)
def Eval(self, T, ip):
r"""Eval(PWConstCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.PWConstCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.PWConstCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_PWConstCoefficient
# Register PWConstCoefficient in _coefficient:
_coefficient.PWConstCoefficient_swigregister(PWConstCoefficient)
class FunctionCoefficient(Coefficient):
r"""Proxy of C++ mfem::FunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(FunctionCoefficient self, std::function< double (mfem::Vector const &) > F) -> FunctionCoefficient
__init__(FunctionCoefficient self, std::function< double (mfem::Vector const &,double) > TDF) -> FunctionCoefficient
__init__(FunctionCoefficient self, double (*)(mfem::Vector &) f) -> FunctionCoefficient
__init__(FunctionCoefficient self, double (*)(mfem::Vector &,double) tdf) -> FunctionCoefficient
"""
_coefficient.FunctionCoefficient_swiginit(self, _coefficient.new_FunctionCoefficient(*args))
def Eval(self, T, ip):
r"""Eval(FunctionCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.FunctionCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.FunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_FunctionCoefficient
# Register FunctionCoefficient in _coefficient:
_coefficient.FunctionCoefficient_swigregister(FunctionCoefficient)
class GridFunctionCoefficient(Coefficient):
r"""Proxy of C++ mfem::GridFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(GridFunctionCoefficient self) -> GridFunctionCoefficient
__init__(GridFunctionCoefficient self, mfem::GridFunction const * gf, int comp=1) -> GridFunctionCoefficient
"""
_coefficient.GridFunctionCoefficient_swiginit(self, _coefficient.new_GridFunctionCoefficient(*args))
def SetGridFunction(self, gf):
r"""SetGridFunction(GridFunctionCoefficient self, mfem::GridFunction const * gf)"""
return _coefficient.GridFunctionCoefficient_SetGridFunction(self, gf)
SetGridFunction = _swig_new_instance_method(_coefficient.GridFunctionCoefficient_SetGridFunction)
def GetGridFunction(self):
r"""GetGridFunction(GridFunctionCoefficient self) -> mfem::GridFunction const *"""
return _coefficient.GridFunctionCoefficient_GetGridFunction(self)
GetGridFunction = _swig_new_instance_method(_coefficient.GridFunctionCoefficient_GetGridFunction)
def Eval(self, T, ip):
r"""Eval(GridFunctionCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.GridFunctionCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.GridFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_GridFunctionCoefficient
# Register GridFunctionCoefficient in _coefficient:
_coefficient.GridFunctionCoefficient_swigregister(GridFunctionCoefficient)
class TransformedCoefficient(Coefficient):
r"""Proxy of C++ mfem::TransformedCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(TransformedCoefficient self, Coefficient q, double (*)(double) F) -> TransformedCoefficient
__init__(TransformedCoefficient self, Coefficient q1, Coefficient q2, double (*)(double,double) F) -> TransformedCoefficient
"""
_coefficient.TransformedCoefficient_swiginit(self, _coefficient.new_TransformedCoefficient(*args))
def Eval(self, T, ip):
r"""Eval(TransformedCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.TransformedCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.TransformedCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_TransformedCoefficient
# Register TransformedCoefficient in _coefficient:
_coefficient.TransformedCoefficient_swigregister(TransformedCoefficient)
class DeltaCoefficient(Coefficient):
r"""Proxy of C++ mfem::DeltaCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DeltaCoefficient self) -> DeltaCoefficient
__init__(DeltaCoefficient self, double x, double s) -> DeltaCoefficient
__init__(DeltaCoefficient self, double x, double y, double s) -> DeltaCoefficient
__init__(DeltaCoefficient self, double x, double y, double z, double s) -> DeltaCoefficient
"""
_coefficient.DeltaCoefficient_swiginit(self, _coefficient.new_DeltaCoefficient(*args))
def SetDeltaCenter(self, center):
r"""SetDeltaCenter(DeltaCoefficient self, Vector center)"""
return _coefficient.DeltaCoefficient_SetDeltaCenter(self, center)
SetDeltaCenter = _swig_new_instance_method(_coefficient.DeltaCoefficient_SetDeltaCenter)
def SetScale(self, _s):
r"""SetScale(DeltaCoefficient self, double _s)"""
return _coefficient.DeltaCoefficient_SetScale(self, _s)
SetScale = _swig_new_instance_method(_coefficient.DeltaCoefficient_SetScale)
def SetFunction(self, f):
r"""SetFunction(DeltaCoefficient self, double (*)(double) f)"""
return _coefficient.DeltaCoefficient_SetFunction(self, f)
SetFunction = _swig_new_instance_method(_coefficient.DeltaCoefficient_SetFunction)
def SetTol(self, _tol):
r"""SetTol(DeltaCoefficient self, double _tol)"""
return _coefficient.DeltaCoefficient_SetTol(self, _tol)
SetTol = _swig_new_instance_method(_coefficient.DeltaCoefficient_SetTol)
def SetWeight(self, w):
r"""SetWeight(DeltaCoefficient self, Coefficient w)"""
w.thisown=0
return _coefficient.DeltaCoefficient_SetWeight(self, w)
def Center(self):
r"""Center(DeltaCoefficient self) -> double const *"""
return _coefficient.DeltaCoefficient_Center(self)
Center = _swig_new_instance_method(_coefficient.DeltaCoefficient_Center)
def Scale(self):
r"""Scale(DeltaCoefficient self) -> double"""
return _coefficient.DeltaCoefficient_Scale(self)
Scale = _swig_new_instance_method(_coefficient.DeltaCoefficient_Scale)
def Tol(self):
r"""Tol(DeltaCoefficient self) -> double"""
return _coefficient.DeltaCoefficient_Tol(self)
Tol = _swig_new_instance_method(_coefficient.DeltaCoefficient_Tol)
def Weight(self):
r"""Weight(DeltaCoefficient self) -> Coefficient"""
return _coefficient.DeltaCoefficient_Weight(self)
Weight = _swig_new_instance_method(_coefficient.DeltaCoefficient_Weight)
def GetDeltaCenter(self, center):
r"""GetDeltaCenter(DeltaCoefficient self, Vector center)"""
return _coefficient.DeltaCoefficient_GetDeltaCenter(self, center)
GetDeltaCenter = _swig_new_instance_method(_coefficient.DeltaCoefficient_GetDeltaCenter)
def EvalDelta(self, T, ip):
r"""EvalDelta(DeltaCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.DeltaCoefficient_EvalDelta(self, T, ip)
EvalDelta = _swig_new_instance_method(_coefficient.DeltaCoefficient_EvalDelta)
def Eval(self, T, ip):
r"""Eval(DeltaCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.DeltaCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.DeltaCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_DeltaCoefficient
# Register DeltaCoefficient in _coefficient:
_coefficient.DeltaCoefficient_swigregister(DeltaCoefficient)
class RestrictedCoefficient(Coefficient):
r"""Proxy of C++ mfem::RestrictedCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, _c, attr):
r"""__init__(RestrictedCoefficient self, Coefficient _c, intArray attr) -> RestrictedCoefficient"""
_coefficient.RestrictedCoefficient_swiginit(self, _coefficient.new_RestrictedCoefficient(_c, attr))
self._ref_to_c = _c
def Eval(self, T, ip):
r"""Eval(RestrictedCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.RestrictedCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.RestrictedCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_RestrictedCoefficient
# Register RestrictedCoefficient in _coefficient:
_coefficient.RestrictedCoefficient_swigregister(RestrictedCoefficient)
class VectorCoefficient(object):
r"""Proxy of C++ mfem::VectorCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetTime(self, t):
r"""SetTime(VectorCoefficient self, double t)"""
return _coefficient.VectorCoefficient_SetTime(self, t)
SetTime = _swig_new_instance_method(_coefficient.VectorCoefficient_SetTime)
def GetTime(self):
r"""GetTime(VectorCoefficient self) -> double"""
return _coefficient.VectorCoefficient_GetTime(self)
GetTime = _swig_new_instance_method(_coefficient.VectorCoefficient_GetTime)
def GetVDim(self):
r"""GetVDim(VectorCoefficient self) -> int"""
return _coefficient.VectorCoefficient_GetVDim(self)
GetVDim = _swig_new_instance_method(_coefficient.VectorCoefficient_GetVDim)
def Eval(self, *args):
r"""
Eval(VectorCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.VectorCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorCoefficient
# Register VectorCoefficient in _coefficient:
_coefficient.VectorCoefficient_swigregister(VectorCoefficient)
class VectorConstantCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorConstantCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, v):
r"""__init__(VectorConstantCoefficient self, Vector v) -> VectorConstantCoefficient"""
try:
import numpy as np
value = np.array(v, copy=False, dtype=float).flatten()
can_np_array = True
except:
can_np_array = False
if can_np_array:
v = mfem._ser.vector.Vector(value)
self._value = v
else:
pass
_coefficient.VectorConstantCoefficient_swiginit(self, _coefficient.new_VectorConstantCoefficient(v))
def Eval(self, *args):
r"""
Eval(VectorConstantCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorConstantCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorConstantCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorConstantCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorConstantCoefficient_Eval)
def GetVec(self):
r"""GetVec(VectorConstantCoefficient self) -> Vector"""
return _coefficient.VectorConstantCoefficient_GetVec(self)
GetVec = _swig_new_instance_method(_coefficient.VectorConstantCoefficient_GetVec)
__swig_destroy__ = _coefficient.delete_VectorConstantCoefficient
# Register VectorConstantCoefficient in _coefficient:
_coefficient.VectorConstantCoefficient_swigregister(VectorConstantCoefficient)
class VectorFunctionCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(VectorFunctionCoefficient self, int dim, std::function< void (mfem::Vector const &,mfem::Vector &) > F, Coefficient q=None) -> VectorFunctionCoefficient
__init__(VectorFunctionCoefficient self, int dim, std::function< void (mfem::Vector const &,double,mfem::Vector &) > TDF, Coefficient q=None) -> VectorFunctionCoefficient
"""
_coefficient.VectorFunctionCoefficient_swiginit(self, _coefficient.new_VectorFunctionCoefficient(*args))
def Eval(self, *args):
r"""
Eval(VectorFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorFunctionCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorFunctionCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorFunctionCoefficient
# Register VectorFunctionCoefficient in _coefficient:
_coefficient.VectorFunctionCoefficient_swigregister(VectorFunctionCoefficient)
class VectorArrayCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorArrayCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, dim):
r"""__init__(VectorArrayCoefficient self, int dim) -> VectorArrayCoefficient"""
_coefficient.VectorArrayCoefficient_swiginit(self, _coefficient.new_VectorArrayCoefficient(dim))
def GetCoeff(self, i):
r"""GetCoeff(VectorArrayCoefficient self, int i) -> Coefficient"""
return _coefficient.VectorArrayCoefficient_GetCoeff(self, i)
GetCoeff = _swig_new_instance_method(_coefficient.VectorArrayCoefficient_GetCoeff)
def GetCoeffs(self):
r"""GetCoeffs(VectorArrayCoefficient self) -> mfem::Coefficient **"""
return _coefficient.VectorArrayCoefficient_GetCoeffs(self)
GetCoeffs = _swig_new_instance_method(_coefficient.VectorArrayCoefficient_GetCoeffs)
def Set(self, i, c, own=True):
r"""Set(VectorArrayCoefficient self, int i, Coefficient c, bool own=True)"""
c.thisown=0
return _coefficient.VectorArrayCoefficient_Set(self, i, c, own)
def Eval(self, *args):
r"""
Eval(VectorArrayCoefficient self, int i, ElementTransformation T, IntegrationPoint ip) -> double
Eval(VectorArrayCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorArrayCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorArrayCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorArrayCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorArrayCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorArrayCoefficient
# Register VectorArrayCoefficient in _coefficient:
_coefficient.VectorArrayCoefficient_swigregister(VectorArrayCoefficient)
class VectorGridFunctionCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorGridFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(VectorGridFunctionCoefficient self) -> VectorGridFunctionCoefficient
__init__(VectorGridFunctionCoefficient self, mfem::GridFunction const * gf) -> VectorGridFunctionCoefficient
"""
_coefficient.VectorGridFunctionCoefficient_swiginit(self, _coefficient.new_VectorGridFunctionCoefficient(*args))
def SetGridFunction(self, gf):
r"""SetGridFunction(VectorGridFunctionCoefficient self, mfem::GridFunction const * gf)"""
return _coefficient.VectorGridFunctionCoefficient_SetGridFunction(self, gf)
SetGridFunction = _swig_new_instance_method(_coefficient.VectorGridFunctionCoefficient_SetGridFunction)
def GetGridFunction(self):
r"""GetGridFunction(VectorGridFunctionCoefficient self) -> mfem::GridFunction const *"""
return _coefficient.VectorGridFunctionCoefficient_GetGridFunction(self)
GetGridFunction = _swig_new_instance_method(_coefficient.VectorGridFunctionCoefficient_GetGridFunction)
def Eval(self, *args):
r"""
Eval(VectorGridFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorGridFunctionCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.VectorGridFunctionCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorGridFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorGridFunctionCoefficient
# Register VectorGridFunctionCoefficient in _coefficient:
_coefficient.VectorGridFunctionCoefficient_swigregister(VectorGridFunctionCoefficient)
class GradientGridFunctionCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::GradientGridFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, gf):
r"""__init__(GradientGridFunctionCoefficient self, mfem::GridFunction const * gf) -> GradientGridFunctionCoefficient"""
_coefficient.GradientGridFunctionCoefficient_swiginit(self, _coefficient.new_GradientGridFunctionCoefficient(gf))
def SetGridFunction(self, gf):
r"""SetGridFunction(GradientGridFunctionCoefficient self, mfem::GridFunction const * gf)"""
return _coefficient.GradientGridFunctionCoefficient_SetGridFunction(self, gf)
SetGridFunction = _swig_new_instance_method(_coefficient.GradientGridFunctionCoefficient_SetGridFunction)
def GetGridFunction(self):
r"""GetGridFunction(GradientGridFunctionCoefficient self) -> mfem::GridFunction const *"""
return _coefficient.GradientGridFunctionCoefficient_GetGridFunction(self)
GetGridFunction = _swig_new_instance_method(_coefficient.GradientGridFunctionCoefficient_GetGridFunction)
def Eval(self, *args):
r"""
Eval(GradientGridFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(GradientGridFunctionCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.GradientGridFunctionCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.GradientGridFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_GradientGridFunctionCoefficient
# Register GradientGridFunctionCoefficient in _coefficient:
_coefficient.GradientGridFunctionCoefficient_swigregister(GradientGridFunctionCoefficient)
class CurlGridFunctionCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::CurlGridFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetGridFunction(self, gf):
r"""SetGridFunction(CurlGridFunctionCoefficient self, mfem::GridFunction const * gf)"""
return _coefficient.CurlGridFunctionCoefficient_SetGridFunction(self, gf)
SetGridFunction = _swig_new_instance_method(_coefficient.CurlGridFunctionCoefficient_SetGridFunction)
def GetGridFunction(self):
r"""GetGridFunction(CurlGridFunctionCoefficient self) -> mfem::GridFunction const *"""
return _coefficient.CurlGridFunctionCoefficient_GetGridFunction(self)
GetGridFunction = _swig_new_instance_method(_coefficient.CurlGridFunctionCoefficient_GetGridFunction)
def Eval(self, *args):
r"""
Eval(CurlGridFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(CurlGridFunctionCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(CurlGridFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.CurlGridFunctionCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.CurlGridFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_CurlGridFunctionCoefficient
# Register CurlGridFunctionCoefficient in _coefficient:
_coefficient.CurlGridFunctionCoefficient_swigregister(CurlGridFunctionCoefficient)
class DivergenceGridFunctionCoefficient(Coefficient):
r"""Proxy of C++ mfem::DivergenceGridFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, gf):
r"""__init__(DivergenceGridFunctionCoefficient self, mfem::GridFunction const * gf) -> DivergenceGridFunctionCoefficient"""
_coefficient.DivergenceGridFunctionCoefficient_swiginit(self, _coefficient.new_DivergenceGridFunctionCoefficient(gf))
def SetGridFunction(self, gf):
r"""SetGridFunction(DivergenceGridFunctionCoefficient self, mfem::GridFunction const * gf)"""
return _coefficient.DivergenceGridFunctionCoefficient_SetGridFunction(self, gf)
SetGridFunction = _swig_new_instance_method(_coefficient.DivergenceGridFunctionCoefficient_SetGridFunction)
def GetGridFunction(self):
r"""GetGridFunction(DivergenceGridFunctionCoefficient self) -> mfem::GridFunction const *"""
return _coefficient.DivergenceGridFunctionCoefficient_GetGridFunction(self)
GetGridFunction = _swig_new_instance_method(_coefficient.DivergenceGridFunctionCoefficient_GetGridFunction)
def Eval(self, T, ip):
r"""Eval(DivergenceGridFunctionCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.DivergenceGridFunctionCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.DivergenceGridFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_DivergenceGridFunctionCoefficient
# Register DivergenceGridFunctionCoefficient in _coefficient:
_coefficient.DivergenceGridFunctionCoefficient_swigregister(DivergenceGridFunctionCoefficient)
class VectorDeltaCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorDeltaCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(VectorDeltaCoefficient self, int _vdim) -> VectorDeltaCoefficient
__init__(VectorDeltaCoefficient self, Vector _dir) -> VectorDeltaCoefficient
__init__(VectorDeltaCoefficient self, Vector _dir, double x, double s) -> VectorDeltaCoefficient
__init__(VectorDeltaCoefficient self, Vector _dir, double x, double y, double s) -> VectorDeltaCoefficient
__init__(VectorDeltaCoefficient self, Vector _dir, double x, double y, double z, double s) -> VectorDeltaCoefficient
"""
_coefficient.VectorDeltaCoefficient_swiginit(self, _coefficient.new_VectorDeltaCoefficient(*args))
def SetDeltaCoefficient(self, _d):
r"""SetDeltaCoefficient(VectorDeltaCoefficient self, DeltaCoefficient _d)"""
return _coefficient.VectorDeltaCoefficient_SetDeltaCoefficient(self, _d)
SetDeltaCoefficient = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_SetDeltaCoefficient)
def GetDeltaCoefficient(self):
r"""GetDeltaCoefficient(VectorDeltaCoefficient self) -> DeltaCoefficient"""
return _coefficient.VectorDeltaCoefficient_GetDeltaCoefficient(self)
GetDeltaCoefficient = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_GetDeltaCoefficient)
def SetScale(self, s):
r"""SetScale(VectorDeltaCoefficient self, double s)"""
return _coefficient.VectorDeltaCoefficient_SetScale(self, s)
SetScale = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_SetScale)
def SetDirection(self, _d):
r"""SetDirection(VectorDeltaCoefficient self, Vector _d)"""
return _coefficient.VectorDeltaCoefficient_SetDirection(self, _d)
SetDirection = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_SetDirection)
def SetDeltaCenter(self, center):
r"""SetDeltaCenter(VectorDeltaCoefficient self, Vector center)"""
return _coefficient.VectorDeltaCoefficient_SetDeltaCenter(self, center)
SetDeltaCenter = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_SetDeltaCenter)
def GetDeltaCenter(self, center):
r"""GetDeltaCenter(VectorDeltaCoefficient self, Vector center)"""
return _coefficient.VectorDeltaCoefficient_GetDeltaCenter(self, center)
GetDeltaCenter = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_GetDeltaCenter)
def EvalDelta(self, V, T, ip):
r"""EvalDelta(VectorDeltaCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.VectorDeltaCoefficient_EvalDelta(self, V, T, ip)
EvalDelta = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_EvalDelta)
def Eval(self, *args):
r"""
Eval(VectorDeltaCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorDeltaCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorDeltaCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorDeltaCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorDeltaCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorDeltaCoefficient
# Register VectorDeltaCoefficient in _coefficient:
_coefficient.VectorDeltaCoefficient_swigregister(VectorDeltaCoefficient)
class VectorRestrictedCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorRestrictedCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, vc, attr):
r"""__init__(VectorRestrictedCoefficient self, VectorCoefficient vc, intArray attr) -> VectorRestrictedCoefficient"""
_coefficient.VectorRestrictedCoefficient_swiginit(self, _coefficient.new_VectorRestrictedCoefficient(vc, attr))
self._ref_to_vc = vc
def Eval(self, *args):
r"""
Eval(VectorRestrictedCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorRestrictedCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.VectorRestrictedCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorRestrictedCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorRestrictedCoefficient
# Register VectorRestrictedCoefficient in _coefficient:
_coefficient.VectorRestrictedCoefficient_swigregister(VectorRestrictedCoefficient)
class MatrixCoefficient(object):
r"""Proxy of C++ mfem::MatrixCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetTime(self, t):
r"""SetTime(MatrixCoefficient self, double t)"""
return _coefficient.MatrixCoefficient_SetTime(self, t)
SetTime = _swig_new_instance_method(_coefficient.MatrixCoefficient_SetTime)
def GetTime(self):
r"""GetTime(MatrixCoefficient self) -> double"""
return _coefficient.MatrixCoefficient_GetTime(self)
GetTime = _swig_new_instance_method(_coefficient.MatrixCoefficient_GetTime)
def GetHeight(self):
r"""GetHeight(MatrixCoefficient self) -> int"""
return _coefficient.MatrixCoefficient_GetHeight(self)
GetHeight = _swig_new_instance_method(_coefficient.MatrixCoefficient_GetHeight)
def GetWidth(self):
r"""GetWidth(MatrixCoefficient self) -> int"""
return _coefficient.MatrixCoefficient_GetWidth(self)
GetWidth = _swig_new_instance_method(_coefficient.MatrixCoefficient_GetWidth)
def GetVDim(self):
r"""GetVDim(MatrixCoefficient self) -> int"""
return _coefficient.MatrixCoefficient_GetVDim(self)
GetVDim = _swig_new_instance_method(_coefficient.MatrixCoefficient_GetVDim)
def IsSymmetric(self):
r"""IsSymmetric(MatrixCoefficient self) -> bool"""
return _coefficient.MatrixCoefficient_IsSymmetric(self)
IsSymmetric = _swig_new_instance_method(_coefficient.MatrixCoefficient_IsSymmetric)
def Eval(self, K, T, ip):
r"""Eval(MatrixCoefficient self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixCoefficient_Eval(self, K, T, ip)
Eval = _swig_new_instance_method(_coefficient.MatrixCoefficient_Eval)
def EvalSymmetric(self, K, T, ip):
r"""EvalSymmetric(MatrixCoefficient self, Vector K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixCoefficient_EvalSymmetric(self, K, T, ip)
EvalSymmetric = _swig_new_instance_method(_coefficient.MatrixCoefficient_EvalSymmetric)
__swig_destroy__ = _coefficient.delete_MatrixCoefficient
# Register MatrixCoefficient in _coefficient:
_coefficient.MatrixCoefficient_swigregister(MatrixCoefficient)
class MatrixConstantCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::MatrixConstantCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, m):
r"""__init__(MatrixConstantCoefficient self, DenseMatrix m) -> MatrixConstantCoefficient"""
try:
import numpy as np
value = np.array(m, copy=False, dtype=float)
can_np_array = True
except:
can_np_array = False
if can_np_array:
v = mfem._ser.vector.Vector(np.transpose(value).flatten())
m = mfem._ser.densemat.DenseMatrix(v.GetData(), value.shape[0], value.shape[1])
self._value = (v,m)
else:
pass
_coefficient.MatrixConstantCoefficient_swiginit(self, _coefficient.new_MatrixConstantCoefficient(m))
def Eval(self, *args):
r"""
Eval(MatrixConstantCoefficient self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)
Eval(MatrixConstantCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.MatrixConstantCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.MatrixConstantCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_MatrixConstantCoefficient
# Register MatrixConstantCoefficient in _coefficient:
_coefficient.MatrixConstantCoefficient_swigregister(MatrixConstantCoefficient)
class MatrixFunctionCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::MatrixFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(MatrixFunctionCoefficient self, int dim, std::function< void (mfem::Vector const &,mfem::DenseMatrix &) > F, Coefficient q=None) -> MatrixFunctionCoefficient
__init__(MatrixFunctionCoefficient self, DenseMatrix m, Coefficient q) -> MatrixFunctionCoefficient
__init__(MatrixFunctionCoefficient self, int dim, std::function< void (mfem::Vector const &,double,mfem::DenseMatrix &) > TDF, Coefficient q=None) -> MatrixFunctionCoefficient
__init__(MatrixFunctionCoefficient self, int dim, std::function< void (mfem::Vector const &,mfem::Vector &) > SymmF, Coefficient q=None) -> MatrixFunctionCoefficient
"""
_coefficient.MatrixFunctionCoefficient_swiginit(self, _coefficient.new_MatrixFunctionCoefficient(*args))
def Eval(self, K, T, ip):
r"""Eval(MatrixFunctionCoefficient self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixFunctionCoefficient_Eval(self, K, T, ip)
Eval = _swig_new_instance_method(_coefficient.MatrixFunctionCoefficient_Eval)
def EvalSymmetric(self, K, T, ip):
r"""EvalSymmetric(MatrixFunctionCoefficient self, Vector K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixFunctionCoefficient_EvalSymmetric(self, K, T, ip)
EvalSymmetric = _swig_new_instance_method(_coefficient.MatrixFunctionCoefficient_EvalSymmetric)
__swig_destroy__ = _coefficient.delete_MatrixFunctionCoefficient
# Register MatrixFunctionCoefficient in _coefficient:
_coefficient.MatrixFunctionCoefficient_swigregister(MatrixFunctionCoefficient)
class MatrixArrayCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::MatrixArrayCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, dim):
r"""__init__(MatrixArrayCoefficient self, int dim) -> MatrixArrayCoefficient"""
_coefficient.MatrixArrayCoefficient_swiginit(self, _coefficient.new_MatrixArrayCoefficient(dim))
def GetCoeff(self, i, j):
r"""GetCoeff(MatrixArrayCoefficient self, int i, int j) -> Coefficient"""
return _coefficient.MatrixArrayCoefficient_GetCoeff(self, i, j)
GetCoeff = _swig_new_instance_method(_coefficient.MatrixArrayCoefficient_GetCoeff)
def Set(self, i, j, c, own=True):
r"""Set(MatrixArrayCoefficient self, int i, int j, Coefficient c, bool own=True)"""
c.thisown=0
return _coefficient.MatrixArrayCoefficient_Set(self, i, j, c, own)
def Eval(self, *args):
r"""
Eval(MatrixArrayCoefficient self, int i, int j, ElementTransformation T, IntegrationPoint ip) -> double
Eval(MatrixArrayCoefficient self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.MatrixArrayCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.MatrixArrayCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_MatrixArrayCoefficient
# Register MatrixArrayCoefficient in _coefficient:
_coefficient.MatrixArrayCoefficient_swigregister(MatrixArrayCoefficient)
class MatrixRestrictedCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::MatrixRestrictedCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, mc, attr):
r"""__init__(MatrixRestrictedCoefficient self, MatrixCoefficient mc, intArray attr) -> MatrixRestrictedCoefficient"""
_coefficient.MatrixRestrictedCoefficient_swiginit(self, _coefficient.new_MatrixRestrictedCoefficient(mc, attr))
self._ref_to_mc = mc
def Eval(self, K, T, ip):
r"""Eval(MatrixRestrictedCoefficient self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixRestrictedCoefficient_Eval(self, K, T, ip)
Eval = _swig_new_instance_method(_coefficient.MatrixRestrictedCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_MatrixRestrictedCoefficient
# Register MatrixRestrictedCoefficient in _coefficient:
_coefficient.MatrixRestrictedCoefficient_swigregister(MatrixRestrictedCoefficient)
class SumCoefficient(Coefficient):
r"""Proxy of C++ mfem::SumCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(SumCoefficient self, double A, Coefficient B, double _alpha=1.0, double _beta=1.0) -> SumCoefficient
__init__(SumCoefficient self, Coefficient A, Coefficient B, double _alpha=1.0, double _beta=1.0) -> SumCoefficient
"""
_coefficient.SumCoefficient_swiginit(self, _coefficient.new_SumCoefficient(*args))
def SetAConst(self, A):
r"""SetAConst(SumCoefficient self, double A)"""
return _coefficient.SumCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.SumCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(SumCoefficient self) -> double"""
return _coefficient.SumCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.SumCoefficient_GetAConst)
def SetACoef(self, A):
r"""SetACoef(SumCoefficient self, Coefficient A)"""
return _coefficient.SumCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.SumCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(SumCoefficient self) -> Coefficient"""
return _coefficient.SumCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.SumCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(SumCoefficient self, Coefficient B)"""
return _coefficient.SumCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.SumCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(SumCoefficient self) -> Coefficient"""
return _coefficient.SumCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.SumCoefficient_GetBCoef)
def SetAlpha(self, _alpha):
r"""SetAlpha(SumCoefficient self, double _alpha)"""
return _coefficient.SumCoefficient_SetAlpha(self, _alpha)
SetAlpha = _swig_new_instance_method(_coefficient.SumCoefficient_SetAlpha)
def GetAlpha(self):
r"""GetAlpha(SumCoefficient self) -> double"""
return _coefficient.SumCoefficient_GetAlpha(self)
GetAlpha = _swig_new_instance_method(_coefficient.SumCoefficient_GetAlpha)
def SetBeta(self, _beta):
r"""SetBeta(SumCoefficient self, double _beta)"""
return _coefficient.SumCoefficient_SetBeta(self, _beta)
SetBeta = _swig_new_instance_method(_coefficient.SumCoefficient_SetBeta)
def GetBeta(self):
r"""GetBeta(SumCoefficient self) -> double"""
return _coefficient.SumCoefficient_GetBeta(self)
GetBeta = _swig_new_instance_method(_coefficient.SumCoefficient_GetBeta)
def Eval(self, T, ip):
r"""Eval(SumCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.SumCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.SumCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_SumCoefficient
# Register SumCoefficient in _coefficient:
_coefficient.SumCoefficient_swigregister(SumCoefficient)
class ProductCoefficient(Coefficient):
r"""Proxy of C++ mfem::ProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(ProductCoefficient self, double A, Coefficient B) -> ProductCoefficient
__init__(ProductCoefficient self, Coefficient A, Coefficient B) -> ProductCoefficient
"""
_coefficient.ProductCoefficient_swiginit(self, _coefficient.new_ProductCoefficient(*args))
def SetAConst(self, A):
r"""SetAConst(ProductCoefficient self, double A)"""
return _coefficient.ProductCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.ProductCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(ProductCoefficient self) -> double"""
return _coefficient.ProductCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.ProductCoefficient_GetAConst)
def SetACoef(self, A):
r"""SetACoef(ProductCoefficient self, Coefficient A)"""
return _coefficient.ProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.ProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(ProductCoefficient self) -> Coefficient"""
return _coefficient.ProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.ProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(ProductCoefficient self, Coefficient B)"""
return _coefficient.ProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.ProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(ProductCoefficient self) -> Coefficient"""
return _coefficient.ProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.ProductCoefficient_GetBCoef)
def Eval(self, T, ip):
r"""Eval(ProductCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.ProductCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.ProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_ProductCoefficient
# Register ProductCoefficient in _coefficient:
_coefficient.ProductCoefficient_swigregister(ProductCoefficient)
class RatioCoefficient(Coefficient):
r"""Proxy of C++ mfem::RatioCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(RatioCoefficient self, double A, Coefficient B) -> RatioCoefficient
__init__(RatioCoefficient self, Coefficient A, Coefficient B) -> RatioCoefficient
__init__(RatioCoefficient self, Coefficient A, double B) -> RatioCoefficient
"""
_coefficient.RatioCoefficient_swiginit(self, _coefficient.new_RatioCoefficient(*args))
def SetAConst(self, A):
r"""SetAConst(RatioCoefficient self, double A)"""
return _coefficient.RatioCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.RatioCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(RatioCoefficient self) -> double"""
return _coefficient.RatioCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.RatioCoefficient_GetAConst)
def SetBConst(self, B):
r"""SetBConst(RatioCoefficient self, double B)"""
return _coefficient.RatioCoefficient_SetBConst(self, B)
SetBConst = _swig_new_instance_method(_coefficient.RatioCoefficient_SetBConst)
def GetBConst(self):
r"""GetBConst(RatioCoefficient self) -> double"""
return _coefficient.RatioCoefficient_GetBConst(self)
GetBConst = _swig_new_instance_method(_coefficient.RatioCoefficient_GetBConst)
def SetACoef(self, A):
r"""SetACoef(RatioCoefficient self, Coefficient A)"""
return _coefficient.RatioCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.RatioCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(RatioCoefficient self) -> Coefficient"""
return _coefficient.RatioCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.RatioCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(RatioCoefficient self, Coefficient B)"""
return _coefficient.RatioCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.RatioCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(RatioCoefficient self) -> Coefficient"""
return _coefficient.RatioCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.RatioCoefficient_GetBCoef)
def Eval(self, T, ip):
r"""Eval(RatioCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.RatioCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.RatioCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_RatioCoefficient
# Register RatioCoefficient in _coefficient:
_coefficient.RatioCoefficient_swigregister(RatioCoefficient)
class PowerCoefficient(Coefficient):
r"""Proxy of C++ mfem::PowerCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, _p):
r"""__init__(PowerCoefficient self, Coefficient A, double _p) -> PowerCoefficient"""
_coefficient.PowerCoefficient_swiginit(self, _coefficient.new_PowerCoefficient(A, _p))
def SetACoef(self, A):
r"""SetACoef(PowerCoefficient self, Coefficient A)"""
return _coefficient.PowerCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.PowerCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(PowerCoefficient self) -> Coefficient"""
return _coefficient.PowerCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.PowerCoefficient_GetACoef)
def SetExponent(self, _p):
r"""SetExponent(PowerCoefficient self, double _p)"""
return _coefficient.PowerCoefficient_SetExponent(self, _p)
SetExponent = _swig_new_instance_method(_coefficient.PowerCoefficient_SetExponent)
def GetExponent(self):
r"""GetExponent(PowerCoefficient self) -> double"""
return _coefficient.PowerCoefficient_GetExponent(self)
GetExponent = _swig_new_instance_method(_coefficient.PowerCoefficient_GetExponent)
def Eval(self, T, ip):
r"""Eval(PowerCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.PowerCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.PowerCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_PowerCoefficient
# Register PowerCoefficient in _coefficient:
_coefficient.PowerCoefficient_swigregister(PowerCoefficient)
class InnerProductCoefficient(Coefficient):
r"""Proxy of C++ mfem::InnerProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B):
r"""__init__(InnerProductCoefficient self, VectorCoefficient A, VectorCoefficient B) -> InnerProductCoefficient"""
_coefficient.InnerProductCoefficient_swiginit(self, _coefficient.new_InnerProductCoefficient(A, B))
def SetACoef(self, A):
r"""SetACoef(InnerProductCoefficient self, VectorCoefficient A)"""
return _coefficient.InnerProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.InnerProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(InnerProductCoefficient self) -> VectorCoefficient"""
return _coefficient.InnerProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.InnerProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(InnerProductCoefficient self, VectorCoefficient B)"""
return _coefficient.InnerProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.InnerProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(InnerProductCoefficient self) -> VectorCoefficient"""
return _coefficient.InnerProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.InnerProductCoefficient_GetBCoef)
def Eval(self, T, ip):
r"""Eval(InnerProductCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.InnerProductCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.InnerProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_InnerProductCoefficient
# Register InnerProductCoefficient in _coefficient:
_coefficient.InnerProductCoefficient_swigregister(InnerProductCoefficient)
class VectorRotProductCoefficient(Coefficient):
r"""Proxy of C++ mfem::VectorRotProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B):
r"""__init__(VectorRotProductCoefficient self, VectorCoefficient A, VectorCoefficient B) -> VectorRotProductCoefficient"""
_coefficient.VectorRotProductCoefficient_swiginit(self, _coefficient.new_VectorRotProductCoefficient(A, B))
def SetACoef(self, A):
r"""SetACoef(VectorRotProductCoefficient self, VectorCoefficient A)"""
return _coefficient.VectorRotProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.VectorRotProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(VectorRotProductCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorRotProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.VectorRotProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(VectorRotProductCoefficient self, VectorCoefficient B)"""
return _coefficient.VectorRotProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.VectorRotProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(VectorRotProductCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorRotProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.VectorRotProductCoefficient_GetBCoef)
def Eval(self, T, ip):
r"""Eval(VectorRotProductCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.VectorRotProductCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.VectorRotProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorRotProductCoefficient
# Register VectorRotProductCoefficient in _coefficient:
_coefficient.VectorRotProductCoefficient_swigregister(VectorRotProductCoefficient)
class DeterminantCoefficient(Coefficient):
r"""Proxy of C++ mfem::DeterminantCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A):
r"""__init__(DeterminantCoefficient self, MatrixCoefficient A) -> DeterminantCoefficient"""
_coefficient.DeterminantCoefficient_swiginit(self, _coefficient.new_DeterminantCoefficient(A))
def SetACoef(self, A):
r"""SetACoef(DeterminantCoefficient self, MatrixCoefficient A)"""
return _coefficient.DeterminantCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.DeterminantCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(DeterminantCoefficient self) -> MatrixCoefficient"""
return _coefficient.DeterminantCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.DeterminantCoefficient_GetACoef)
def Eval(self, T, ip):
r"""Eval(DeterminantCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.DeterminantCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.DeterminantCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_DeterminantCoefficient
# Register DeterminantCoefficient in _coefficient:
_coefficient.DeterminantCoefficient_swigregister(DeterminantCoefficient)
class VectorSumCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorSumCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(VectorSumCoefficient self, int dim) -> VectorSumCoefficient
__init__(VectorSumCoefficient self, VectorCoefficient A, VectorCoefficient B, double _alpha=1.0, double _beta=1.0) -> VectorSumCoefficient
__init__(VectorSumCoefficient self, VectorCoefficient _A, VectorCoefficient _B, Coefficient _alpha, Coefficient _beta) -> VectorSumCoefficient
"""
_coefficient.VectorSumCoefficient_swiginit(self, _coefficient.new_VectorSumCoefficient(*args))
def SetACoef(self, A):
r"""SetACoef(VectorSumCoefficient self, VectorCoefficient A)"""
return _coefficient.VectorSumCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(VectorSumCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorSumCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(VectorSumCoefficient self, VectorCoefficient B)"""
return _coefficient.VectorSumCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(VectorSumCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorSumCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetBCoef)
def SetAlphaCoef(self, A):
r"""SetAlphaCoef(VectorSumCoefficient self, Coefficient A)"""
return _coefficient.VectorSumCoefficient_SetAlphaCoef(self, A)
SetAlphaCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetAlphaCoef)
def GetAlphaCoef(self):
r"""GetAlphaCoef(VectorSumCoefficient self) -> Coefficient"""
return _coefficient.VectorSumCoefficient_GetAlphaCoef(self)
GetAlphaCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetAlphaCoef)
def SetBetaCoef(self, B):
r"""SetBetaCoef(VectorSumCoefficient self, Coefficient B)"""
return _coefficient.VectorSumCoefficient_SetBetaCoef(self, B)
SetBetaCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetBetaCoef)
def GetBetaCoef(self):
r"""GetBetaCoef(VectorSumCoefficient self) -> Coefficient"""
return _coefficient.VectorSumCoefficient_GetBetaCoef(self)
GetBetaCoef = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetBetaCoef)
def SetA(self, _A):
r"""SetA(VectorSumCoefficient self, Vector _A)"""
return _coefficient.VectorSumCoefficient_SetA(self, _A)
SetA = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetA)
def GetA(self):
r"""GetA(VectorSumCoefficient self) -> Vector"""
return _coefficient.VectorSumCoefficient_GetA(self)
GetA = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetA)
def SetB(self, _B):
r"""SetB(VectorSumCoefficient self, Vector _B)"""
return _coefficient.VectorSumCoefficient_SetB(self, _B)
SetB = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetB)
def GetB(self):
r"""GetB(VectorSumCoefficient self) -> Vector"""
return _coefficient.VectorSumCoefficient_GetB(self)
GetB = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetB)
def SetAlpha(self, _alpha):
r"""SetAlpha(VectorSumCoefficient self, double _alpha)"""
return _coefficient.VectorSumCoefficient_SetAlpha(self, _alpha)
SetAlpha = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetAlpha)
def GetAlpha(self):
r"""GetAlpha(VectorSumCoefficient self) -> double"""
return _coefficient.VectorSumCoefficient_GetAlpha(self)
GetAlpha = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetAlpha)
def SetBeta(self, _beta):
r"""SetBeta(VectorSumCoefficient self, double _beta)"""
return _coefficient.VectorSumCoefficient_SetBeta(self, _beta)
SetBeta = _swig_new_instance_method(_coefficient.VectorSumCoefficient_SetBeta)
def GetBeta(self):
r"""GetBeta(VectorSumCoefficient self) -> double"""
return _coefficient.VectorSumCoefficient_GetBeta(self)
GetBeta = _swig_new_instance_method(_coefficient.VectorSumCoefficient_GetBeta)
def Eval(self, *args):
r"""
Eval(VectorSumCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorSumCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.VectorSumCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorSumCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorSumCoefficient
# Register VectorSumCoefficient in _coefficient:
_coefficient.VectorSumCoefficient_swigregister(VectorSumCoefficient)
class ScalarVectorProductCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::ScalarVectorProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(ScalarVectorProductCoefficient self, double A, VectorCoefficient B) -> ScalarVectorProductCoefficient
__init__(ScalarVectorProductCoefficient self, Coefficient A, VectorCoefficient B) -> ScalarVectorProductCoefficient
"""
_coefficient.ScalarVectorProductCoefficient_swiginit(self, _coefficient.new_ScalarVectorProductCoefficient(*args))
def SetAConst(self, A):
r"""SetAConst(ScalarVectorProductCoefficient self, double A)"""
return _coefficient.ScalarVectorProductCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(ScalarVectorProductCoefficient self) -> double"""
return _coefficient.ScalarVectorProductCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_GetAConst)
def SetACoef(self, A):
r"""SetACoef(ScalarVectorProductCoefficient self, Coefficient A)"""
return _coefficient.ScalarVectorProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(ScalarVectorProductCoefficient self) -> Coefficient"""
return _coefficient.ScalarVectorProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(ScalarVectorProductCoefficient self, VectorCoefficient B)"""
return _coefficient.ScalarVectorProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(ScalarVectorProductCoefficient self) -> VectorCoefficient"""
return _coefficient.ScalarVectorProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_GetBCoef)
def Eval(self, *args):
r"""
Eval(ScalarVectorProductCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(ScalarVectorProductCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.ScalarVectorProductCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.ScalarVectorProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_ScalarVectorProductCoefficient
# Register ScalarVectorProductCoefficient in _coefficient:
_coefficient.ScalarVectorProductCoefficient_swigregister(ScalarVectorProductCoefficient)
class NormalizedVectorCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::NormalizedVectorCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, tol=1e-6):
r"""__init__(NormalizedVectorCoefficient self, VectorCoefficient A, double tol=1e-6) -> NormalizedVectorCoefficient"""
_coefficient.NormalizedVectorCoefficient_swiginit(self, _coefficient.new_NormalizedVectorCoefficient(A, tol))
def SetACoef(self, A):
r"""SetACoef(NormalizedVectorCoefficient self, VectorCoefficient A)"""
return _coefficient.NormalizedVectorCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.NormalizedVectorCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(NormalizedVectorCoefficient self) -> VectorCoefficient"""
return _coefficient.NormalizedVectorCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.NormalizedVectorCoefficient_GetACoef)
def Eval(self, *args):
r"""
Eval(NormalizedVectorCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(NormalizedVectorCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.NormalizedVectorCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.NormalizedVectorCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_NormalizedVectorCoefficient
# Register NormalizedVectorCoefficient in _coefficient:
_coefficient.NormalizedVectorCoefficient_swigregister(NormalizedVectorCoefficient)
class VectorCrossProductCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorCrossProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B):
r"""__init__(VectorCrossProductCoefficient self, VectorCoefficient A, VectorCoefficient B) -> VectorCrossProductCoefficient"""
_coefficient.VectorCrossProductCoefficient_swiginit(self, _coefficient.new_VectorCrossProductCoefficient(A, B))
def SetACoef(self, A):
r"""SetACoef(VectorCrossProductCoefficient self, VectorCoefficient A)"""
return _coefficient.VectorCrossProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.VectorCrossProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(VectorCrossProductCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorCrossProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.VectorCrossProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(VectorCrossProductCoefficient self, VectorCoefficient B)"""
return _coefficient.VectorCrossProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.VectorCrossProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(VectorCrossProductCoefficient self) -> VectorCoefficient"""
return _coefficient.VectorCrossProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.VectorCrossProductCoefficient_GetBCoef)
def Eval(self, *args):
r"""
Eval(VectorCrossProductCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorCrossProductCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.VectorCrossProductCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorCrossProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorCrossProductCoefficient
# Register VectorCrossProductCoefficient in _coefficient:
_coefficient.VectorCrossProductCoefficient_swigregister(VectorCrossProductCoefficient)
class MatrixVectorProductCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::MatrixVectorProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B):
r"""__init__(MatrixVectorProductCoefficient self, MatrixCoefficient A, VectorCoefficient B) -> MatrixVectorProductCoefficient"""
_coefficient.MatrixVectorProductCoefficient_swiginit(self, _coefficient.new_MatrixVectorProductCoefficient(A, B))
def SetACoef(self, A):
r"""SetACoef(MatrixVectorProductCoefficient self, MatrixCoefficient A)"""
return _coefficient.MatrixVectorProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.MatrixVectorProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(MatrixVectorProductCoefficient self) -> MatrixCoefficient"""
return _coefficient.MatrixVectorProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.MatrixVectorProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(MatrixVectorProductCoefficient self, VectorCoefficient B)"""
return _coefficient.MatrixVectorProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.MatrixVectorProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(MatrixVectorProductCoefficient self) -> VectorCoefficient"""
return _coefficient.MatrixVectorProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.MatrixVectorProductCoefficient_GetBCoef)
def Eval(self, *args):
r"""
Eval(MatrixVectorProductCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(MatrixVectorProductCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
"""
return _coefficient.MatrixVectorProductCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.MatrixVectorProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_MatrixVectorProductCoefficient
# Register MatrixVectorProductCoefficient in _coefficient:
_coefficient.MatrixVectorProductCoefficient_swigregister(MatrixVectorProductCoefficient)
class IdentityMatrixCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::IdentityMatrixCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, d):
r"""__init__(IdentityMatrixCoefficient self, int d) -> IdentityMatrixCoefficient"""
_coefficient.IdentityMatrixCoefficient_swiginit(self, _coefficient.new_IdentityMatrixCoefficient(d))
def Eval(self, M, T, ip):
r"""Eval(IdentityMatrixCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.IdentityMatrixCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.IdentityMatrixCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_IdentityMatrixCoefficient
# Register IdentityMatrixCoefficient in _coefficient:
_coefficient.IdentityMatrixCoefficient_swigregister(IdentityMatrixCoefficient)
class MatrixSumCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::MatrixSumCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B, _alpha=1.0, _beta=1.0):
r"""__init__(MatrixSumCoefficient self, MatrixCoefficient A, MatrixCoefficient B, double _alpha=1.0, double _beta=1.0) -> MatrixSumCoefficient"""
_coefficient.MatrixSumCoefficient_swiginit(self, _coefficient.new_MatrixSumCoefficient(A, B, _alpha, _beta))
def SetACoef(self, A):
r"""SetACoef(MatrixSumCoefficient self, MatrixCoefficient A)"""
return _coefficient.MatrixSumCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(MatrixSumCoefficient self) -> MatrixCoefficient"""
return _coefficient.MatrixSumCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(MatrixSumCoefficient self, MatrixCoefficient B)"""
return _coefficient.MatrixSumCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(MatrixSumCoefficient self) -> MatrixCoefficient"""
return _coefficient.MatrixSumCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_GetBCoef)
def SetAlpha(self, _alpha):
r"""SetAlpha(MatrixSumCoefficient self, double _alpha)"""
return _coefficient.MatrixSumCoefficient_SetAlpha(self, _alpha)
SetAlpha = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_SetAlpha)
def GetAlpha(self):
r"""GetAlpha(MatrixSumCoefficient self) -> double"""
return _coefficient.MatrixSumCoefficient_GetAlpha(self)
GetAlpha = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_GetAlpha)
def SetBeta(self, _beta):
r"""SetBeta(MatrixSumCoefficient self, double _beta)"""
return _coefficient.MatrixSumCoefficient_SetBeta(self, _beta)
SetBeta = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_SetBeta)
def GetBeta(self):
r"""GetBeta(MatrixSumCoefficient self) -> double"""
return _coefficient.MatrixSumCoefficient_GetBeta(self)
GetBeta = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_GetBeta)
def Eval(self, M, T, ip):
r"""Eval(MatrixSumCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixSumCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.MatrixSumCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_MatrixSumCoefficient
# Register MatrixSumCoefficient in _coefficient:
_coefficient.MatrixSumCoefficient_swigregister(MatrixSumCoefficient)
class ScalarMatrixProductCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::ScalarMatrixProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(ScalarMatrixProductCoefficient self, double A, MatrixCoefficient B) -> ScalarMatrixProductCoefficient
__init__(ScalarMatrixProductCoefficient self, Coefficient A, MatrixCoefficient B) -> ScalarMatrixProductCoefficient
"""
_coefficient.ScalarMatrixProductCoefficient_swiginit(self, _coefficient.new_ScalarMatrixProductCoefficient(*args))
def SetAConst(self, A):
r"""SetAConst(ScalarMatrixProductCoefficient self, double A)"""
return _coefficient.ScalarMatrixProductCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(ScalarMatrixProductCoefficient self) -> double"""
return _coefficient.ScalarMatrixProductCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_GetAConst)
def SetACoef(self, A):
r"""SetACoef(ScalarMatrixProductCoefficient self, Coefficient A)"""
return _coefficient.ScalarMatrixProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(ScalarMatrixProductCoefficient self) -> Coefficient"""
return _coefficient.ScalarMatrixProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(ScalarMatrixProductCoefficient self, MatrixCoefficient B)"""
return _coefficient.ScalarMatrixProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(ScalarMatrixProductCoefficient self) -> MatrixCoefficient"""
return _coefficient.ScalarMatrixProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_GetBCoef)
def Eval(self, M, T, ip):
r"""Eval(ScalarMatrixProductCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.ScalarMatrixProductCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.ScalarMatrixProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_ScalarMatrixProductCoefficient
# Register ScalarMatrixProductCoefficient in _coefficient:
_coefficient.ScalarMatrixProductCoefficient_swigregister(ScalarMatrixProductCoefficient)
class TransposeMatrixCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::TransposeMatrixCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A):
r"""__init__(TransposeMatrixCoefficient self, MatrixCoefficient A) -> TransposeMatrixCoefficient"""
_coefficient.TransposeMatrixCoefficient_swiginit(self, _coefficient.new_TransposeMatrixCoefficient(A))
def SetACoef(self, A):
r"""SetACoef(TransposeMatrixCoefficient self, MatrixCoefficient A)"""
return _coefficient.TransposeMatrixCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.TransposeMatrixCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(TransposeMatrixCoefficient self) -> MatrixCoefficient"""
return _coefficient.TransposeMatrixCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.TransposeMatrixCoefficient_GetACoef)
def Eval(self, M, T, ip):
r"""Eval(TransposeMatrixCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.TransposeMatrixCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.TransposeMatrixCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_TransposeMatrixCoefficient
# Register TransposeMatrixCoefficient in _coefficient:
_coefficient.TransposeMatrixCoefficient_swigregister(TransposeMatrixCoefficient)
class InverseMatrixCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::InverseMatrixCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A):
r"""__init__(InverseMatrixCoefficient self, MatrixCoefficient A) -> InverseMatrixCoefficient"""
_coefficient.InverseMatrixCoefficient_swiginit(self, _coefficient.new_InverseMatrixCoefficient(A))
def SetACoef(self, A):
r"""SetACoef(InverseMatrixCoefficient self, MatrixCoefficient A)"""
return _coefficient.InverseMatrixCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.InverseMatrixCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(InverseMatrixCoefficient self) -> MatrixCoefficient"""
return _coefficient.InverseMatrixCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.InverseMatrixCoefficient_GetACoef)
def Eval(self, M, T, ip):
r"""Eval(InverseMatrixCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.InverseMatrixCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.InverseMatrixCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_InverseMatrixCoefficient
# Register InverseMatrixCoefficient in _coefficient:
_coefficient.InverseMatrixCoefficient_swigregister(InverseMatrixCoefficient)
class OuterProductCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::OuterProductCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, B):
r"""__init__(OuterProductCoefficient self, VectorCoefficient A, VectorCoefficient B) -> OuterProductCoefficient"""
_coefficient.OuterProductCoefficient_swiginit(self, _coefficient.new_OuterProductCoefficient(A, B))
def SetACoef(self, A):
r"""SetACoef(OuterProductCoefficient self, VectorCoefficient A)"""
return _coefficient.OuterProductCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.OuterProductCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(OuterProductCoefficient self) -> VectorCoefficient"""
return _coefficient.OuterProductCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.OuterProductCoefficient_GetACoef)
def SetBCoef(self, B):
r"""SetBCoef(OuterProductCoefficient self, VectorCoefficient B)"""
return _coefficient.OuterProductCoefficient_SetBCoef(self, B)
SetBCoef = _swig_new_instance_method(_coefficient.OuterProductCoefficient_SetBCoef)
def GetBCoef(self):
r"""GetBCoef(OuterProductCoefficient self) -> VectorCoefficient"""
return _coefficient.OuterProductCoefficient_GetBCoef(self)
GetBCoef = _swig_new_instance_method(_coefficient.OuterProductCoefficient_GetBCoef)
def Eval(self, M, T, ip):
r"""Eval(OuterProductCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.OuterProductCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.OuterProductCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_OuterProductCoefficient
# Register OuterProductCoefficient in _coefficient:
_coefficient.OuterProductCoefficient_swigregister(OuterProductCoefficient)
class CrossCrossCoefficient(MatrixCoefficient):
r"""Proxy of C++ mfem::CrossCrossCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, A, K):
r"""__init__(CrossCrossCoefficient self, Coefficient A, VectorCoefficient K) -> CrossCrossCoefficient"""
_coefficient.CrossCrossCoefficient_swiginit(self, _coefficient.new_CrossCrossCoefficient(A, K))
def SetAConst(self, A):
r"""SetAConst(CrossCrossCoefficient self, double A)"""
return _coefficient.CrossCrossCoefficient_SetAConst(self, A)
SetAConst = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_SetAConst)
def GetAConst(self):
r"""GetAConst(CrossCrossCoefficient self) -> double"""
return _coefficient.CrossCrossCoefficient_GetAConst(self)
GetAConst = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_GetAConst)
def SetACoef(self, A):
r"""SetACoef(CrossCrossCoefficient self, Coefficient A)"""
return _coefficient.CrossCrossCoefficient_SetACoef(self, A)
SetACoef = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_SetACoef)
def GetACoef(self):
r"""GetACoef(CrossCrossCoefficient self) -> Coefficient"""
return _coefficient.CrossCrossCoefficient_GetACoef(self)
GetACoef = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_GetACoef)
def SetKCoef(self, K):
r"""SetKCoef(CrossCrossCoefficient self, VectorCoefficient K)"""
return _coefficient.CrossCrossCoefficient_SetKCoef(self, K)
SetKCoef = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_SetKCoef)
def GetKCoef(self):
r"""GetKCoef(CrossCrossCoefficient self) -> VectorCoefficient"""
return _coefficient.CrossCrossCoefficient_GetKCoef(self)
GetKCoef = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_GetKCoef)
def Eval(self, M, T, ip):
r"""Eval(CrossCrossCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.CrossCrossCoefficient_Eval(self, M, T, ip)
Eval = _swig_new_instance_method(_coefficient.CrossCrossCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_CrossCrossCoefficient
# Register CrossCrossCoefficient in _coefficient:
_coefficient.CrossCrossCoefficient_swigregister(CrossCrossCoefficient)
class VectorQuadratureFunctionCoefficient(VectorCoefficient):
r"""Proxy of C++ mfem::VectorQuadratureFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetComponent(self, _index, _length):
r"""SetComponent(VectorQuadratureFunctionCoefficient self, int _index, int _length)"""
return _coefficient.VectorQuadratureFunctionCoefficient_SetComponent(self, _index, _length)
SetComponent = _swig_new_instance_method(_coefficient.VectorQuadratureFunctionCoefficient_SetComponent)
def GetQuadFunction(self):
r"""GetQuadFunction(VectorQuadratureFunctionCoefficient self) -> mfem::QuadratureFunction const &"""
return _coefficient.VectorQuadratureFunctionCoefficient_GetQuadFunction(self)
GetQuadFunction = _swig_new_instance_method(_coefficient.VectorQuadratureFunctionCoefficient_GetQuadFunction)
def Eval(self, *args):
r"""
Eval(VectorQuadratureFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
Eval(VectorQuadratureFunctionCoefficient self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorQuadratureFunctionCoefficient self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorQuadratureFunctionCoefficient_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorQuadratureFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_VectorQuadratureFunctionCoefficient
# Register VectorQuadratureFunctionCoefficient in _coefficient:
_coefficient.VectorQuadratureFunctionCoefficient_swigregister(VectorQuadratureFunctionCoefficient)
class QuadratureFunctionCoefficient(Coefficient):
r"""Proxy of C++ mfem::QuadratureFunctionCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, qf):
r"""__init__(QuadratureFunctionCoefficient self, mfem::QuadratureFunction & qf) -> QuadratureFunctionCoefficient"""
_coefficient.QuadratureFunctionCoefficient_swiginit(self, _coefficient.new_QuadratureFunctionCoefficient(qf))
def GetQuadFunction(self):
r"""GetQuadFunction(QuadratureFunctionCoefficient self) -> mfem::QuadratureFunction const &"""
return _coefficient.QuadratureFunctionCoefficient_GetQuadFunction(self)
GetQuadFunction = _swig_new_instance_method(_coefficient.QuadratureFunctionCoefficient_GetQuadFunction)
def Eval(self, T, ip):
r"""Eval(QuadratureFunctionCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.QuadratureFunctionCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.QuadratureFunctionCoefficient_Eval)
__swig_destroy__ = _coefficient.delete_QuadratureFunctionCoefficient
# Register QuadratureFunctionCoefficient in _coefficient:
_coefficient.QuadratureFunctionCoefficient_swigregister(QuadratureFunctionCoefficient)
def ComputeLpNorm(*args):
r"""
ComputeLpNorm(double p, Coefficient coeff, mfem::Mesh & mesh, mfem::IntegrationRule const *[] irs) -> double
ComputeLpNorm(double p, VectorCoefficient coeff, mfem::Mesh & mesh, mfem::IntegrationRule const *[] irs) -> double
"""
return _coefficient.ComputeLpNorm(*args)
ComputeLpNorm = _coefficient.ComputeLpNorm
class NumbaFunctionBase(object):
r"""Proxy of C++ NumbaFunctionBase class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, input, sdim, td):
r"""__init__(NumbaFunctionBase self, PyObject * input, int sdim, bool td) -> NumbaFunctionBase"""
_coefficient.NumbaFunctionBase_swiginit(self, _coefficient.new_NumbaFunctionBase(input, sdim, td))
def print_add(self):
r"""print_add(NumbaFunctionBase self) -> double"""
return _coefficient.NumbaFunctionBase_print_add(self)
print_add = _swig_new_instance_method(_coefficient.NumbaFunctionBase_print_add)
__swig_destroy__ = _coefficient.delete_NumbaFunctionBase
# Register NumbaFunctionBase in _coefficient:
_coefficient.NumbaFunctionBase_swigregister(NumbaFunctionBase)
class NumbaFunction(NumbaFunctionBase):
r"""Proxy of C++ NumbaFunction class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(NumbaFunction self, PyObject * input, int sdim) -> NumbaFunction
__init__(NumbaFunction self, PyObject * input, int sdim, bool td) -> NumbaFunction
"""
_coefficient.NumbaFunction_swiginit(self, _coefficient.new_NumbaFunction(*args))
def call0(self, x):
r"""call0(NumbaFunction self, Vector x) -> double"""
return _coefficient.NumbaFunction_call0(self, x)
call0 = _swig_new_instance_method(_coefficient.NumbaFunction_call0)
def call(self, x):
r"""call(NumbaFunction self, Vector x) -> double"""
return _coefficient.NumbaFunction_call(self, x)
call = _swig_new_instance_method(_coefficient.NumbaFunction_call)
def call0t(self, x, t):
r"""call0t(NumbaFunction self, Vector x, double t) -> double"""
return _coefficient.NumbaFunction_call0t(self, x, t)
call0t = _swig_new_instance_method(_coefficient.NumbaFunction_call0t)
def callt(self, x, t):
r"""callt(NumbaFunction self, Vector x, double t) -> double"""
return _coefficient.NumbaFunction_callt(self, x, t)
callt = _swig_new_instance_method(_coefficient.NumbaFunction_callt)
def GenerateCoefficient(self, use_0=0):
r"""GenerateCoefficient(NumbaFunction self, int use_0=0) -> FunctionCoefficient"""
val = _coefficient.NumbaFunction_GenerateCoefficient(self, use_0)
val._link = self
return val
__swig_destroy__ = _coefficient.delete_NumbaFunction
# Register NumbaFunction in _coefficient:
_coefficient.NumbaFunction_swigregister(NumbaFunction)
class VectorNumbaFunction(NumbaFunctionBase):
r"""Proxy of C++ VectorNumbaFunction class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(VectorNumbaFunction self, PyObject * input, int sdim, int vdim) -> VectorNumbaFunction
__init__(VectorNumbaFunction self, PyObject * input, int sdim, int vdim, bool td) -> VectorNumbaFunction
"""
_coefficient.VectorNumbaFunction_swiginit(self, _coefficient.new_VectorNumbaFunction(*args))
def call(self, x, out):
r"""call(VectorNumbaFunction self, Vector x, Vector out)"""
return _coefficient.VectorNumbaFunction_call(self, x, out)
call = _swig_new_instance_method(_coefficient.VectorNumbaFunction_call)
def callt(self, x, t, out):
r"""callt(VectorNumbaFunction self, Vector x, double t, Vector out)"""
return _coefficient.VectorNumbaFunction_callt(self, x, t, out)
callt = _swig_new_instance_method(_coefficient.VectorNumbaFunction_callt)
def call0(self, x, out):
r"""call0(VectorNumbaFunction self, Vector x, Vector out)"""
return _coefficient.VectorNumbaFunction_call0(self, x, out)
call0 = _swig_new_instance_method(_coefficient.VectorNumbaFunction_call0)
def call0t(self, x, t, out):
r"""call0t(VectorNumbaFunction self, Vector x, double t, Vector out)"""
return _coefficient.VectorNumbaFunction_call0t(self, x, t, out)
call0t = _swig_new_instance_method(_coefficient.VectorNumbaFunction_call0t)
def GenerateCoefficient(self, use_0=0):
r"""GenerateCoefficient(VectorNumbaFunction self, int use_0=0) -> VectorFunctionCoefficient"""
val = _coefficient.VectorNumbaFunction_GenerateCoefficient(self, use_0)
val._link = self
return val
__swig_destroy__ = _coefficient.delete_VectorNumbaFunction
# Register VectorNumbaFunction in _coefficient:
_coefficient.VectorNumbaFunction_swigregister(VectorNumbaFunction)
class MatrixNumbaFunction(object):
r"""Proxy of C++ MatrixNumbaFunction class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(MatrixNumbaFunction self, PyObject * input, int sdim, int vdim) -> MatrixNumbaFunction
__init__(MatrixNumbaFunction self, PyObject * input, int sdim, int vdim, bool td) -> MatrixNumbaFunction
"""
_coefficient.MatrixNumbaFunction_swiginit(self, _coefficient.new_MatrixNumbaFunction(*args))
def call(self, x, out):
r"""call(MatrixNumbaFunction self, Vector x, DenseMatrix out)"""
return _coefficient.MatrixNumbaFunction_call(self, x, out)
call = _swig_new_instance_method(_coefficient.MatrixNumbaFunction_call)
def callt(self, x, t, out):
r"""callt(MatrixNumbaFunction self, Vector x, double t, DenseMatrix out)"""
return _coefficient.MatrixNumbaFunction_callt(self, x, t, out)
callt = _swig_new_instance_method(_coefficient.MatrixNumbaFunction_callt)
def call0(self, x, out):
r"""call0(MatrixNumbaFunction self, Vector x, DenseMatrix out)"""
return _coefficient.MatrixNumbaFunction_call0(self, x, out)
call0 = _swig_new_instance_method(_coefficient.MatrixNumbaFunction_call0)
def call0t(self, x, t, out):
r"""call0t(MatrixNumbaFunction self, Vector x, double t, DenseMatrix out)"""
return _coefficient.MatrixNumbaFunction_call0t(self, x, t, out)
call0t = _swig_new_instance_method(_coefficient.MatrixNumbaFunction_call0t)
def GenerateCoefficient(self, use_0=0):
r"""GenerateCoefficient(MatrixNumbaFunction self, int use_0=0) -> MatrixFunctionCoefficient"""
val = _coefficient.MatrixNumbaFunction_GenerateCoefficient(self, use_0)
val._link = self
return val
__swig_destroy__ = _coefficient.delete_MatrixNumbaFunction
# Register MatrixNumbaFunction in _coefficient:
_coefficient.MatrixNumbaFunction_swigregister(MatrixNumbaFunction)
try:
from numba import cfunc, types, carray
scalar_sig = types.double(types.CPointer(types.double),
types.intc)
scalar_sig_t = types.double(types.CPointer(types.double),
types.double,
types.intc)
vector_sig = types.void(types.CPointer(types.double),
types.CPointer(types.double),
types.intc,
types.intc)
vector_sig_t = types.void(types.CPointer(types.double),
types.double,
types.CPointer(types.double),
types.intc,
types.intc)
matrix_sig = vector_sig
matrix_sig_t = vector_sig_t
from inspect import signature
class _JIT(object):
def scalar(self, sdim=3, td=False):
def dec(func):
l = len(signature(func).parameters)
if l == 1 and not td:
sig = types.double(types.CPointer(types.double))
use_0 = 1
elif l == 2 and not td:
sig = scalar_sig
use_0 = 0
elif l == 2 and td:
sig = types.double(types.CPointer(types.double),
types.double)
use_0 = 1
elif l == 3 and td:
sig = scalar_sig_t
use_0 = 0
from numba import cfunc
ff = cfunc(sig)(func)
coeff = NumbaFunction(ff, sdim, td).GenerateCoefficient(use_0)
return coeff
return dec
def vector(self, sdim=3, vdim=-1, td=False):
vdim = sdim if vdim==-1 else vdim
def dec(func):
l = len(signature(func).parameters)
if l == 2 and not td:
sig = types.void(types.CPointer(types.double),
types.CPointer(types.double),)
use_0 = 1
elif l == 4 and not td:
sig = vector_sig
use_0 = 0
elif l == 3 and td:
sig = types.void(types.CPointer(types.double),
types.CPointer(types.double),
types.double)
use_0 = 1
elif l == 5 and td:
sig = vector_sig_t
use_0 = 0
else:
assert False, "Unsupported signature type"
from numba import cfunc
ff = cfunc(sig)(func)
coeff = VectorNumbaFunction(ff, sdim, vdim, td).GenerateCoefficient(use_0)
return coeff
return dec
def matrxi(self, sdim=3, vdim=-1, td=False):
vdim = sdim if vdim==-1 else vdim
def dec(func):
l = len(signature(func).parameters)
if l == 2 and not td:
sig = types.void(types.CPointer(types.double),
types.CPointer(types.double),)
use_0 = 1
elif l == 4 and not td:
sig = matrix_sig
use_0 = 0
elif l == 3 and td:
sig = types.void(types.CPointer(types.double),
types.CPointer(types.double),
types.double)
use_0 = 1
elif l == 5 and td:
sig = matrix_sig_t
use_0 = 0
else:
assert False, "Unsupported signature type"
from numba import cfunc
ff = cfunc(sig)(func)
coeff = MatrxixNumbaFunction(ff, sdim, vdim, td).GenerateCoefficient(use_0)
return coeff
return dec
jit = _JIT()
except ImportError:
pass
except BaseError:
assert False, "Failed setting Numba signatures by an error other than ImportError"
def fake_func(x):
r"""fake_func(Vector x) -> double"""
return _coefficient.fake_func(x)
fake_func = _coefficient.fake_func
def fake_func_vec(x, Ht):
r"""fake_func_vec(Vector x, Vector Ht)"""
return _coefficient.fake_func_vec(x, Ht)
fake_func_vec = _coefficient.fake_func_vec
def fake_func_mat(x, Kt):
r"""fake_func_mat(Vector x, DenseMatrix Kt)"""
return _coefficient.fake_func_mat(x, Kt)
fake_func_mat = _coefficient.fake_func_mat
class PyCoefficientBase(FunctionCoefficient):
r"""Proxy of C++ mfem::PyCoefficientBase class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, tdep):
r"""__init__(PyCoefficientBase self, int tdep) -> PyCoefficientBase"""
if self.__class__ == PyCoefficientBase:
_self = None
else:
_self = self
_coefficient.PyCoefficientBase_swiginit(self, _coefficient.new_PyCoefficientBase(_self, tdep))
def Eval(self, T, ip):
r"""Eval(PyCoefficientBase self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _coefficient.PyCoefficientBase_Eval(self, T, ip)
Eval = _swig_new_instance_method(_coefficient.PyCoefficientBase_Eval)
def _EvalPy(self, arg0):
r"""_EvalPy(PyCoefficientBase self, Vector arg0) -> double"""
return _coefficient.PyCoefficientBase__EvalPy(self, arg0)
_EvalPy = _swig_new_instance_method(_coefficient.PyCoefficientBase__EvalPy)
def _EvalPyT(self, arg0, arg1):
r"""_EvalPyT(PyCoefficientBase self, Vector arg0, double arg1) -> double"""
return _coefficient.PyCoefficientBase__EvalPyT(self, arg0, arg1)
_EvalPyT = _swig_new_instance_method(_coefficient.PyCoefficientBase__EvalPyT)
__swig_destroy__ = _coefficient.delete_PyCoefficientBase
def __disown__(self):
self.this.disown()
_coefficient.disown_PyCoefficientBase(self)
return weakref.proxy(self)
# Register PyCoefficientBase in _coefficient:
_coefficient.PyCoefficientBase_swigregister(PyCoefficientBase)
class VectorPyCoefficientBase(VectorFunctionCoefficient):
r"""Proxy of C++ mfem::VectorPyCoefficientBase class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, dim, tdep, q=None):
r"""__init__(VectorPyCoefficientBase self, int dim, int tdep, Coefficient q=None) -> VectorPyCoefficientBase"""
if self.__class__ == VectorPyCoefficientBase:
_self = None
else:
_self = self
_coefficient.VectorPyCoefficientBase_swiginit(self, _coefficient.new_VectorPyCoefficientBase(_self, dim, tdep, q))
def Eval(self, *args):
r"""
Eval(VectorPyCoefficientBase self, DenseMatrix M, ElementTransformation T, IntegrationRule ir)
Eval(VectorPyCoefficientBase self, Vector V, ElementTransformation T, IntegrationPoint ip)
"""
return _coefficient.VectorPyCoefficientBase_Eval(self, *args)
Eval = _swig_new_instance_method(_coefficient.VectorPyCoefficientBase_Eval)
def _EvalPy(self, arg0, arg1):
r"""_EvalPy(VectorPyCoefficientBase self, Vector arg0, Vector arg1)"""
return _coefficient.VectorPyCoefficientBase__EvalPy(self, arg0, arg1)
_EvalPy = _swig_new_instance_method(_coefficient.VectorPyCoefficientBase__EvalPy)
def _EvalPyT(self, arg0, arg1, arg2):
r"""_EvalPyT(VectorPyCoefficientBase self, Vector arg0, double arg1, Vector arg2)"""
return _coefficient.VectorPyCoefficientBase__EvalPyT(self, arg0, arg1, arg2)
_EvalPyT = _swig_new_instance_method(_coefficient.VectorPyCoefficientBase__EvalPyT)
__swig_destroy__ = _coefficient.delete_VectorPyCoefficientBase
def __disown__(self):
self.this.disown()
_coefficient.disown_VectorPyCoefficientBase(self)
return weakref.proxy(self)
# Register VectorPyCoefficientBase in _coefficient:
_coefficient.VectorPyCoefficientBase_swigregister(VectorPyCoefficientBase)
class MatrixPyCoefficientBase(MatrixFunctionCoefficient):
r"""Proxy of C++ mfem::MatrixPyCoefficientBase class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, dim, tdep):
r"""__init__(MatrixPyCoefficientBase self, int dim, int tdep) -> MatrixPyCoefficientBase"""
if self.__class__ == MatrixPyCoefficientBase:
_self = None
else:
_self = self
_coefficient.MatrixPyCoefficientBase_swiginit(self, _coefficient.new_MatrixPyCoefficientBase(_self, dim, tdep))
def Eval(self, K, T, ip):
r"""Eval(MatrixPyCoefficientBase self, DenseMatrix K, ElementTransformation T, IntegrationPoint ip)"""
return _coefficient.MatrixPyCoefficientBase_Eval(self, K, T, ip)
Eval = _swig_new_instance_method(_coefficient.MatrixPyCoefficientBase_Eval)
def _EvalPy(self, arg0, arg1):
r"""_EvalPy(MatrixPyCoefficientBase self, Vector arg0, DenseMatrix arg1)"""
return _coefficient.MatrixPyCoefficientBase__EvalPy(self, arg0, arg1)
_EvalPy = _swig_new_instance_method(_coefficient.MatrixPyCoefficientBase__EvalPy)
def _EvalPyT(self, arg0, arg1, arg2):
r"""_EvalPyT(MatrixPyCoefficientBase self, Vector arg0, double arg1, DenseMatrix arg2)"""
return _coefficient.MatrixPyCoefficientBase__EvalPyT(self, arg0, arg1, arg2)
_EvalPyT = _swig_new_instance_method(_coefficient.MatrixPyCoefficientBase__EvalPyT)
__swig_destroy__ = _coefficient.delete_MatrixPyCoefficientBase
def __disown__(self):
self.this.disown()
_coefficient.disown_MatrixPyCoefficientBase(self)
return weakref.proxy(self)
# Register MatrixPyCoefficientBase in _coefficient:
_coefficient.MatrixPyCoefficientBase_swigregister(MatrixPyCoefficientBase)
class PyCoefficient(PyCoefficientBase):
def __init__(self):
PyCoefficientBase.__init__(self, 0)
def _EvalPy(self, x):
return self.EvalValue(x.GetDataArray())
def EvalValue(self, x):
return 0.0
class PyCoefficientT(PyCoefficientBase):
def __init__(self):
PyCoefficientBase.__init__(self, 1)
def _EvalPyT(self, x, t):
return self.EvalValue(x.GetDataArray(), t)
def EvalValue(self, x, t):
return 0.0
class VectorPyCoefficient(VectorPyCoefficientBase):
def __init__(self, dim):
self.vdim = dim
VectorPyCoefficientBase.__init__(self, dim, 0)
def _EvalPy(self, x, V):
v = self.EvalValue(x.GetDataArray())
V.Assign(v)
def _EvalPyT(self, x, t, V):
v = self.EvalValue(x.GetDataArray())
V.Assign(v)
def EvalValue(self, x):
return [0,0,0]
class VectorPyCoefficientT(VectorPyCoefficientBase):
def __init__(self, dim):
self.vdim = dim
VectorPyCoefficientBase.__init__(self, dim, 1)
def _EvalPy(self, x, V):
v = self.EvalValue(x.GetDataArray(), 0)
V.Assign(v)
def _EvalPyT(self, x, t, V):
v = self.EvalValue(x.GetDataArray(), t)
V.Assign(v)
def EvalValue(self, x, t):
return [0,0,0]
class MatrixPyCoefficient(MatrixPyCoefficientBase):
def __init__(self, dim):
self.vdim = dim
MatrixPyCoefficientBase.__init__(self, dim, 0)
def _EvalPy(self, x, K):
k = self.EvalValue(x.GetDataArray())
K.Assign(k)
def EvalValue(self, x):
return np.array([[0,0,0], [0,0,0], [0,0,0]])
class MatrixPyCoefficientT(MatrixPyCoefficientBase):
def __init__(self, dim):
self.vdim = dim
MatrixPyCoefficientBase.__init__(self, dim, 1)
def _EvalPyT(self, x, t, K):
k = self.EvalValue(x.GetDataArray(), t)
K.Assign(k)
def EvalValue(self, x, t):
return np.array([[0,0,0], [0,0,0], [0,0,0]])
| 47.528726
| 183
| 0.745249
|
4dfc18b9572c1c96bf9b7ee69b281b16456b1aa6
| 2,936
|
py
|
Python
|
exchange_server/datadog_checks/exchange_server/check.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
exchange_server/datadog_checks/exchange_server/check.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
exchange_server/datadog_checks/exchange_server/check.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport
from datadog_checks.base.checks.windows.perf_counters.counter import PerfObject
from datadog_checks.base.checks.windows.perf_counters.transform import NATIVE_TRANSFORMERS
from .metrics import METRICS_CONFIG
class ExchangeCheckV2(PerfCountersBaseCheckWithLegacySupport):
__NAMESPACE__ = 'exchange'
def get_default_config(self):
return {'metrics': METRICS_CONFIG}
def get_perf_object(self, connection, object_name, object_config, use_localized_counters, tags):
if object_name == 'Processor':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{'% Processor Time': 'cpu_time', '% User Time': 'cpu_user', '% Privileged Time': 'cpu_privileged'},
)
elif object_name == 'MSExchange Active Manager':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{'Database Mounted': 'database_mounted'},
)
elif object_name == 'Web Service':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{
'Current Connections': 'current_connections_total',
'Connection Attempts/sec': 'connection_attempts',
'Other Request Methods/sec': 'other_attempts',
},
)
else:
return super().get_perf_object(connection, object_name, object_config, use_localized_counters, tags)
class CompatibilityPerfObject(PerfObject):
def __init__(
self,
check,
connection,
object_name,
object_config,
use_localized_counters,
tags,
aggregate_names,
):
super().__init__(check, connection, object_name, object_config, use_localized_counters, tags)
self._aggregate_names = aggregate_names
def _configure_counters(self, available_counters, available_instances):
super()._configure_counters(available_counters, available_instances)
for counter in self.counters:
if counter.name not in self._aggregate_names:
continue
counter.aggregate_transformer = NATIVE_TRANSFORMERS[counter.metric_type](
self.check, f'{self.metric_prefix}.{self._aggregate_names[counter.name]}', {}
)
| 36.246914
| 115
| 0.616485
|
2b72f711154f8823c19eb2dd6047a99b87cf86ae
| 2,664
|
py
|
Python
|
service/generated_flatbuffers/tflite/MirrorPadOptions.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 213
|
2021-06-11T01:15:16.000Z
|
2022-02-25T16:18:57.000Z
|
service/generated_flatbuffers/tflite/MirrorPadOptions.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 32
|
2021-06-17T17:58:54.000Z
|
2022-02-02T05:58:10.000Z
|
service/generated_flatbuffers/tflite/MirrorPadOptions.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 28
|
2021-06-17T17:34:21.000Z
|
2022-03-24T14:05:20.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class MirrorPadOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsMirrorPadOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MirrorPadOptions()
x.Init(buf, n + offset)
return x
@classmethod
def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# MirrorPadOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# MirrorPadOptions
def Mode(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def MirrorPadOptionsStart(builder): builder.StartObject(1)
def MirrorPadOptionsAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0)
def MirrorPadOptionsEnd(builder): return builder.EndObject()
class MirrorPadOptionsT(object):
# MirrorPadOptionsT
def __init__(self):
self.mode = 0 # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
mirrorPadOptions = MirrorPadOptions()
mirrorPadOptions.Init(buf, pos)
return cls.InitFromObj(mirrorPadOptions)
@classmethod
def InitFromObj(cls, mirrorPadOptions):
x = MirrorPadOptionsT()
x._UnPack(mirrorPadOptions)
return x
# MirrorPadOptionsT
def _UnPack(self, mirrorPadOptions):
if mirrorPadOptions is None:
return
self.mode = mirrorPadOptions.Mode()
# MirrorPadOptionsT
def Pack(self, builder):
MirrorPadOptionsStart(builder)
MirrorPadOptionsAddMode(builder, self.mode)
mirrorPadOptions = MirrorPadOptionsEnd(builder)
return mirrorPadOptions
| 32.096386
| 114
| 0.709459
|
8ed0be66810d38945d2d44a1f072eba182e43410
| 14,392
|
py
|
Python
|
apteco_api/models/endpoint_details.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | 2
|
2020-05-21T14:24:16.000Z
|
2020-12-03T19:56:34.000Z
|
apteco_api/models/endpoint_details.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
apteco_api/models/endpoint_details.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class EndpointDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'group_name': 'str',
'method': 'str',
'url_template': 'str',
'allows_anonymous_access': 'bool',
'is_experimental': 'bool',
'is_under_development': 'bool',
'requires_licence_flags': 'list[str]',
'optionally_requires_licence_flags': 'list[str]',
'requires_roles': 'list[str]'
}
attribute_map = {
'name': 'name',
'group_name': 'groupName',
'method': 'method',
'url_template': 'urlTemplate',
'allows_anonymous_access': 'allowsAnonymousAccess',
'is_experimental': 'isExperimental',
'is_under_development': 'isUnderDevelopment',
'requires_licence_flags': 'requiresLicenceFlags',
'optionally_requires_licence_flags': 'optionallyRequiresLicenceFlags',
'requires_roles': 'requiresRoles'
}
def __init__(self, name=None, group_name=None, method=None, url_template=None, allows_anonymous_access=None, is_experimental=None, is_under_development=None, requires_licence_flags=None, optionally_requires_licence_flags=None, requires_roles=None): # noqa: E501
"""EndpointDetails - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._group_name = None
self._method = None
self._url_template = None
self._allows_anonymous_access = None
self._is_experimental = None
self._is_under_development = None
self._requires_licence_flags = None
self._optionally_requires_licence_flags = None
self._requires_roles = None
self.discriminator = None
self.name = name
self.group_name = group_name
self.method = method
self.url_template = url_template
self.allows_anonymous_access = allows_anonymous_access
self.is_experimental = is_experimental
self.is_under_development = is_under_development
self.requires_licence_flags = requires_licence_flags
self.optionally_requires_licence_flags = optionally_requires_licence_flags
self.requires_roles = requires_roles
@property
def name(self):
"""Gets the name of this EndpointDetails. # noqa: E501
The name of the endpoint # noqa: E501
:return: The name of this EndpointDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EndpointDetails.
The name of the endpoint # noqa: E501
:param name: The name of this EndpointDetails. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def group_name(self):
"""Gets the group_name of this EndpointDetails. # noqa: E501
The name of the group this endpoint belongs to # noqa: E501
:return: The group_name of this EndpointDetails. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this EndpointDetails.
The name of the group this endpoint belongs to # noqa: E501
:param group_name: The group_name of this EndpointDetails. # noqa: E501
:type: str
"""
if group_name is None:
raise ValueError("Invalid value for `group_name`, must not be `None`") # noqa: E501
self._group_name = group_name
@property
def method(self):
"""Gets the method of this EndpointDetails. # noqa: E501
The HTTP method used for calling this endpoint # noqa: E501
:return: The method of this EndpointDetails. # noqa: E501
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this EndpointDetails.
The HTTP method used for calling this endpoint # noqa: E501
:param method: The method of this EndpointDetails. # noqa: E501
:type: str
"""
if method is None:
raise ValueError("Invalid value for `method`, must not be `None`") # noqa: E501
self._method = method
@property
def url_template(self):
"""Gets the url_template of this EndpointDetails. # noqa: E501
The URL template of this endpoint # noqa: E501
:return: The url_template of this EndpointDetails. # noqa: E501
:rtype: str
"""
return self._url_template
@url_template.setter
def url_template(self, url_template):
"""Sets the url_template of this EndpointDetails.
The URL template of this endpoint # noqa: E501
:param url_template: The url_template of this EndpointDetails. # noqa: E501
:type: str
"""
if url_template is None:
raise ValueError("Invalid value for `url_template`, must not be `None`") # noqa: E501
self._url_template = url_template
@property
def allows_anonymous_access(self):
"""Gets the allows_anonymous_access of this EndpointDetails. # noqa: E501
Whether this endpoint can be accessed without authentication details # noqa: E501
:return: The allows_anonymous_access of this EndpointDetails. # noqa: E501
:rtype: bool
"""
return self._allows_anonymous_access
@allows_anonymous_access.setter
def allows_anonymous_access(self, allows_anonymous_access):
"""Sets the allows_anonymous_access of this EndpointDetails.
Whether this endpoint can be accessed without authentication details # noqa: E501
:param allows_anonymous_access: The allows_anonymous_access of this EndpointDetails. # noqa: E501
:type: bool
"""
if allows_anonymous_access is None:
raise ValueError("Invalid value for `allows_anonymous_access`, must not be `None`") # noqa: E501
self._allows_anonymous_access = allows_anonymous_access
@property
def is_experimental(self):
"""Gets the is_experimental of this EndpointDetails. # noqa: E501
Whether this endpoint has been marked as experimental # noqa: E501
:return: The is_experimental of this EndpointDetails. # noqa: E501
:rtype: bool
"""
return self._is_experimental
@is_experimental.setter
def is_experimental(self, is_experimental):
"""Sets the is_experimental of this EndpointDetails.
Whether this endpoint has been marked as experimental # noqa: E501
:param is_experimental: The is_experimental of this EndpointDetails. # noqa: E501
:type: bool
"""
if is_experimental is None:
raise ValueError("Invalid value for `is_experimental`, must not be `None`") # noqa: E501
self._is_experimental = is_experimental
@property
def is_under_development(self):
"""Gets the is_under_development of this EndpointDetails. # noqa: E501
Whether this endpoint has been marked as under development # noqa: E501
:return: The is_under_development of this EndpointDetails. # noqa: E501
:rtype: bool
"""
return self._is_under_development
@is_under_development.setter
def is_under_development(self, is_under_development):
"""Sets the is_under_development of this EndpointDetails.
Whether this endpoint has been marked as under development # noqa: E501
:param is_under_development: The is_under_development of this EndpointDetails. # noqa: E501
:type: bool
"""
if is_under_development is None:
raise ValueError("Invalid value for `is_under_development`, must not be `None`") # noqa: E501
self._is_under_development = is_under_development
@property
def requires_licence_flags(self):
"""Gets the requires_licence_flags of this EndpointDetails. # noqa: E501
The set of licence flags that the user must have in order to be able to call the endpoint # noqa: E501
:return: The requires_licence_flags of this EndpointDetails. # noqa: E501
:rtype: list[str]
"""
return self._requires_licence_flags
@requires_licence_flags.setter
def requires_licence_flags(self, requires_licence_flags):
"""Sets the requires_licence_flags of this EndpointDetails.
The set of licence flags that the user must have in order to be able to call the endpoint # noqa: E501
:param requires_licence_flags: The requires_licence_flags of this EndpointDetails. # noqa: E501
:type: list[str]
"""
if requires_licence_flags is None:
raise ValueError("Invalid value for `requires_licence_flags`, must not be `None`") # noqa: E501
allowed_values = ["AudienceSelection", "AudiencePreview", "Export", "AdvancedQuery", "Cube", "Profile"] # noqa: E501
if not set(requires_licence_flags).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `requires_licence_flags` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(requires_licence_flags) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._requires_licence_flags = requires_licence_flags
@property
def optionally_requires_licence_flags(self):
"""Gets the optionally_requires_licence_flags of this EndpointDetails. # noqa: E501
The set of licence flags that the user might need to have in order to be able to call the endpoint, depending on the type of request sent to the endpoint # noqa: E501
:return: The optionally_requires_licence_flags of this EndpointDetails. # noqa: E501
:rtype: list[str]
"""
return self._optionally_requires_licence_flags
@optionally_requires_licence_flags.setter
def optionally_requires_licence_flags(self, optionally_requires_licence_flags):
"""Sets the optionally_requires_licence_flags of this EndpointDetails.
The set of licence flags that the user might need to have in order to be able to call the endpoint, depending on the type of request sent to the endpoint # noqa: E501
:param optionally_requires_licence_flags: The optionally_requires_licence_flags of this EndpointDetails. # noqa: E501
:type: list[str]
"""
if optionally_requires_licence_flags is None:
raise ValueError("Invalid value for `optionally_requires_licence_flags`, must not be `None`") # noqa: E501
allowed_values = ["AudienceSelection", "AudiencePreview", "Export", "AdvancedQuery", "Cube", "Profile"] # noqa: E501
if not set(optionally_requires_licence_flags).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `optionally_requires_licence_flags` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(optionally_requires_licence_flags) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._optionally_requires_licence_flags = optionally_requires_licence_flags
@property
def requires_roles(self):
"""Gets the requires_roles of this EndpointDetails. # noqa: E501
Any roles that the user must have to access this endpoint # noqa: E501
:return: The requires_roles of this EndpointDetails. # noqa: E501
:rtype: list[str]
"""
return self._requires_roles
@requires_roles.setter
def requires_roles(self, requires_roles):
"""Sets the requires_roles of this EndpointDetails.
Any roles that the user must have to access this endpoint # noqa: E501
:param requires_roles: The requires_roles of this EndpointDetails. # noqa: E501
:type: list[str]
"""
if requires_roles is None:
raise ValueError("Invalid value for `requires_roles`, must not be `None`") # noqa: E501
self._requires_roles = requires_roles
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EndpointDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.714286
| 266
| 0.644803
|
059615f702a40aafe050d709894a6572935b06b2
| 958
|
py
|
Python
|
main.py
|
camoredo/spaceapps
|
fc6b8900046d8b5e00b7b9a3b910128594d4b99f
|
[
"MIT"
] | null | null | null |
main.py
|
camoredo/spaceapps
|
fc6b8900046d8b5e00b7b9a3b910128594d4b99f
|
[
"MIT"
] | null | null | null |
main.py
|
camoredo/spaceapps
|
fc6b8900046d8b5e00b7b9a3b910128594d4b99f
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
names = os.listdir(os.path.join(dir_path, 'inputs'))
scatter_dfs = []
heatmap_dfs = []
count = [0, 0]
for name in names:
if not name.endswith('.txt'):
continue
inp_path = os.path.join(dir_path, 'inputs', name)
df = pd.read_csv(inp_path, delim_whitespace=True)
df = df[['Longitude', 'Latitude']]
df.columns = ['lng', 'lat']
scatter_dfs.append(df)
heatmap_dfs.append(df.groupby(df.columns.tolist()).size().reset_index().rename(columns={0:'count'}))
scatter_df = pd.concat(scatter_dfs)
heatmap_df = pd.concat(heatmap_dfs)
json_scatter_data = scatter_df.to_json(orient="records")
with open(os.path.join(dir_path, 'scatter_data.json'), 'w') as f:
f.write(json_scatter_data)
json_heatmap_data = heatmap_df.to_json(orient="records")
with open(os.path.join(dir_path, 'heatmap_data.json'), 'w') as f:
f.write(json_heatmap_data)
| 27.371429
| 104
| 0.701461
|
2b7b64a30550946c3b169ce0d4edb0dc310483b6
| 488
|
py
|
Python
|
merge_180329.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
merge_180329.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
merge_180329.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2018/3/29 10:38
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : merge_180329.py
# @Software: PyCharm
import pandas as pd
df1 = pd.DataFrame({'key':['b','b','a','c','a','a','b'],'data1':range(7)})
df2 = pd.DataFrame({'key':['a','b','d'],'data2':range(3)})
#默认是inner
# print(pd.merge(df1,df2))
#求取键的并集
# print(pd.merge(df1,df2,how='outer'))
#以左边数组来合并
e = pd.merge(df1,df2,on='key',how='left')
print(e)
| 22.181818
| 74
| 0.602459
|
76f828c474fe00bf1c58a06653f04676802c1af0
| 6,351
|
py
|
Python
|
src/main.py
|
artem-smotrakov/esp8266-watering
|
8240d89dac7c361015648a7376e639d1504c9197
|
[
"MIT"
] | 5
|
2019-09-23T00:56:43.000Z
|
2021-04-17T14:32:44.000Z
|
src/main.py
|
artem-smotrakov/esp8266-watering
|
8240d89dac7c361015648a7376e639d1504c9197
|
[
"MIT"
] | null | null | null |
src/main.py
|
artem-smotrakov/esp8266-watering
|
8240d89dac7c361015648a7376e639d1504c9197
|
[
"MIT"
] | 1
|
2021-03-30T12:48:06.000Z
|
2021-03-30T12:48:06.000Z
|
# ssid and password for the access point
# make sure that the password is not too short
# otherwise, an OSError occurs while setting up a wi-fi access point
ACCESS_POINT_SSID = 'esp8266-watering'
ACCESS_POINT_PASSWORD = 'helloesp8266'
# template for an HTTP response which is sent by the server
HTTP_RESPONSE = b"""\
HTTP/1.0 200 OK
Content-Length: %d
%s
"""
# HTTP redirect response which is sent by the server after processing
# data from the form below
HTTP_REDIRECT = b"""\
HTTP/1.0 301 Moved Permanently
Location: /
"""
# an HTML form for configuring the device
FORM_TEMPLATE = """\
<html>
<head>
<title>Watering system configuration</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script type="text/javascript">
function init() {
var s = document.getElementById('error_handling_options');
for (var i = 0; i < s.options.length; i++) {
if (s.options[i].value == '%error_handling%') {
s.options[i].selected = true;
}
}
}
</script>
</head>
<body>
<h2 style="font-size:10vw">Watering system configuration</h2>
<form method="post">
<h3 style="font-size:5vw">Wi-Fi settings</h3>
<div style="width: 100%;">
<p style="width: 100%;">SSID: <input name="ssid" type="text" value="%ssid%"/></p>
<p style="width: 100%;">Password: <input name="password" type="password"/></p>
</div>
<h3 style="font-size:5vw">Watering settings</h3>
<div style="width: 100%;">
<p style="width: 100%;">Interval: <input name="watering_interval" type="text" value="%watering_interval%"/></p>
<p style="width: 100%;">Duration: <input name="watering_duration" type="text" value="%watering_duration%"/></p>
</div>
<h3 style="font-size:5vw">Error handling</h3>
<div style="width: 100%;">
<p style="width: 100%;">
<select name="error_handling" id="error_handling_options">
<option value="stop">Stop</option>
<option value="reboot">Reboot</option>
<option value="ignore">Ignore</option>
</select>
</p>
</div>
<div>
<p style="width: 100%;"><input type="submit" value="Update"></p>
</div>
</form>
</body>
</html>
"""
# returns an HTTP response with the form above
# the fields in the form contain the current configuration
# (except the password for wi-fi network)
def get_form(config):
form = FORM_TEMPLATE
form = form.replace('%ssid%',
str(config.get('ssid')))
form = form.replace('%watering_interval%',
str(config.get('watering_interval')))
form = form.replace('%watering_duration%',
str(config.get('watering_duration')))
form = form.replace('%error_handling%',
str(config.get('error_handling')))
return HTTP_RESPONSE % (len(form), form)
# a handler for incoming HTTP connections
# it prints out the HTML form above,
# and processes the values from the form submitted by a user
# the values are then stored to the configuration
class ConnectionHandler:
def __init__(self, config):
self.config = config
def handle(self, client_s, status_line, headers, data):
# process data from the web form if a POST request received
# otherwise, print out the form
if status_line.startswith('POST') and data:
# update the config with the data from the form
params = data.split('&')
for param in params:
parts = param.split('=')
name = parts[0].strip()
value = parts[1].strip()
# don't update the password if the password field is empty
if name == 'password' and not value:
continue
config.set(name, value)
# store the config
config.store()
# redirect the client to avoid resubmitting the form
client_s.write(HTTP_REDIRECT)
else:
client_s.write(get_form(config))
# entry point
from pump import Pumps
from config import Config
from machine import Pin
import util
import time
# load a config from a file
config = Config('main.conf')
# initialize the pumps and the switch which turns them on and off
pumps = Pumps(config.get('first_pump_pin'), config.get('second_pump_pin'),
config.get('pump_switch_pin'),
config.get('watering_interval'), config.get('watering_duration'))
# initilize the switch which enables the configuration mode
# if the switch changes its state, then the board is going to reboot immediately
# in order to turn on/off the configuration mode
config_mode_switch = Pin(config.get('config_mode_switch_pin'), Pin.IN)
config_mode_switch.irq(lambda pin: util.reboot())
# first, check if the configuration mode is enabled
# if so, set up an access point, and then start an HTTP server
# the server provides a web form which updates the configuraion of the device
# the server runs on http://192.168.4.1:80
if config_mode_switch.value() == 1:
from http.server import HttpServer
print('enabled configuration mode')
access_point = util.start_access_point(ACCESS_POINT_SSID, ACCESS_POINT_PASSWORD)
handler = ConnectionHandler(config)
ip = access_point.ifconfig()[0]
HttpServer(ip, 80, handler).start()
util.reboot()
# try to connect to wi-fi if the configuraion mode is disabled
util.connect_to_wifi(config.get('ssid'), config.get('password'))
# then, start the main loop
# in the loop, the board is going to check temperature and humidity
# and also turn on the pumps according to the schedule specified by a user
while True:
try:
pumps.check()
except:
if config.get('error_handling') == 'reboot':
util.reboot()
elif config.get('error_handling') == 'stop':
raise
else:
print('achtung! something wrong happened!')
time.sleep(1) # in seconds
| 36.291429
| 132
| 0.61203
|
1ccd9c1f2ed7a2bff625925281e4e3b276aa6c95
| 9,612
|
py
|
Python
|
debrisdiskfm/fm_klip.py
|
seawander/DebisDiskFM
|
717c09c914c53a5b5a7d18afe78307db79cd1c3e
|
[
"BSD-3-Clause"
] | 3
|
2018-07-27T22:06:04.000Z
|
2019-07-19T13:41:51.000Z
|
debrisdiskfm/fm_klip.py
|
seawander/DebisDiskFM
|
717c09c914c53a5b5a7d18afe78307db79cd1c3e
|
[
"BSD-3-Clause"
] | null | null | null |
debrisdiskfm/fm_klip.py
|
seawander/DebisDiskFM
|
717c09c914c53a5b5a7d18afe78307db79cd1c3e
|
[
"BSD-3-Clause"
] | 3
|
2018-07-24T17:52:21.000Z
|
2021-04-11T23:27:39.000Z
|
from astropy.io import fits
import numpy as np
from . import dependencies
import image_registration
# returns the KLIPped model
def flattenAndNormalize(image, mask = None, onlyMasked = True):
"""Flattend and Normalize the image (for KLIP).
Input:
image: image;
mask: 0-1 mask;
onlyMasked: True, then only pixels with maskvalue 1 will be outputed.
Output: result, std
if onlyMasked == True:
only the mask==1 values, and the standard deviation
else:
all the values, and the standard deviation
"""
if np.size(image.shape) == 2:
if mask is None:
mask = np.ones(image.shape, dtype = 'int')
mask_flat = mask.flatten()
result = np.zeros(np.where(mask_flat == 1)[0].shape[0])
result = image.flatten()[np.where(mask_flat == 1)]*1.0 # multiply by 1.0 to convert possible integers to floats
result -= np.nanmean(result)
std = np.nanstd(result)
result /= std
if onlyMasked == True:
return result, std
else:
mask_flat[np.where(mask_flat==1)] = result
return mask_flat, std
elif np.size(image.shape) == 3:
if mask is None:
mask = np.ones(image[0].shape, dtype = 'int')
mask_flat = mask.flatten()
images = np.copy(image)
result = np.zeros((images.shape[0], np.where(mask_flat == 1)[0].shape[0]))
std = np.zeros(images.shape[0])
for i in range(images.shape[0]):
image_slice = images[i]
result[i], std[i] = flattenAndNormalize(image_slice, mask = mask, onlyMasked = onlyMasked)
return result, std
def pcaImageCube(ref, mask = None, pcNum = None, cube=True, ref3D=True, outputEval = False):
"""Principal Component Analysis,
Input:
ref: Cube of references, 3D;
if ref3D==False, 2D (Flattened and Normalized, with maksked region excluded.)
mask: mask, 2D or 1D;
pcNum: how many principal components are needed;
cube: output as a cube? Otherwise a flattend 2D component array will be returned.
ref3D: Ture by default.
outputEval: whether to return the eigen values, False by default.
Output:
The principal components, either cube (3D) or flattend (2D)."""
if mask is None:
mask = np.ones(ref[0].shape)
if pcNum is None:
pcNum = ref.shape[0]
if ref3D:
mask_flat = mask.flatten()
ref_flat = np.zeros((ref.shape[0], np.where(mask_flat == 1)[0].shape[0]))
for i in range(ref_flat.shape[0]):
ref_flat[i], std = flattenAndNormalize(ref[i], mask)
else:
ref_flat = ref
if np.shape(mask.shape)[0] == 1: #1D mask, already flattened
mask_flat = mask
elif np.shape(mask.shape)[0] == 2: #2D mask, need flatten
mask_flat = mask.flatten()
covMatrix = np.dot(ref_flat, np.transpose(ref_flat))
eVal, eVec = np.linalg.eig(covMatrix)
index = (-eVal).argsort()[:pcNum]
eVec = eVec[:,index]
components_flatten = np.dot(np.transpose(eVec), ref_flat)
pc_flat = np.zeros((pcNum, mask_flat.shape[0]))
for i in range(pc_flat.shape[0]):
pc_flat[i][np.where(mask_flat==1)] = components_flatten[i]/np.sqrt(np.dot(components_flatten[i], np.transpose(components_flatten[i])))
if cube == False:
return pc_flat
pc_cube = np.zeros((pcNum, mask.shape[0], mask.shape[1]))
width = mask.shape[0]
for i in range(pc_flat.shape[0]):
pc_cube[i] = np.array(np.split(pc_flat[i], width))
if not outputEval:
return pc_cube
else:
return pc_cube, eVal[index]
def klip(trg, pcs, mask = None, klipK = None, cube = True, trg2D=True):
"""KLIP Algorithm.
Input:
trg: target image, 2D;
if trg2D==False, then it is 1D
(Flattend&Normalized. std=1 and the result should be multiplied by original std!)
pcs: principal components from PCA, 3D cube or 2D cube;
Requirement: For the 2D cube, components are on rows.
klipK: the truncation value.
trg2D: is the target a 2D image?
Output:
Image, if cube == False;
Cube Image of all the slices, if cube == True.
"""
if mask is None:
mask = np.ones(trg.shape)
if klipK is None:
klipK = pcs.shape[0]
mask[np.isnan(trg)] = 0
width = mask.shape[0] # Number of rows, width_y
width_x = mask.shape[1] # The above two lines are used to reconstruct a 1D image back to 2D.
mask_flat = mask.flatten()
if trg2D is True:
trg_flat, std = flattenAndNormalize(trg, mask, onlyMasked = False)
else:
trg_flat = np.zeros(mask_flat.shape)
trg_flat[np.where(mask_flat == 1)] = trg
std = 1
if np.array(pcs.shape).shape[0] == 3:
#3D cube, convert to 2D
pcs_flat = np.zeros((klipK, mask_flat.shape[0]))#Masked region included
for i in range(klipK):
pcs_flat[i] = pcs[i].flatten() #[np.where(mask_flat == 1)]
else:
#2D cube
pcs_flat = pcs[:klipK]
coef = np.transpose(np.dot(pcs_flat, np.transpose(trg_flat))[np.newaxis])
if cube == False:
result_flat = trg_flat - np.sum(coef * pcs_flat, axis = 0)
temp_result = result_flat
return temp_result.reshape(width, width_x) * std
else:
result_flat = np.dot(np.ones((klipK, 1)),
trg_flat[np.newaxis]) - np.dot(np.tril(np.ones((klipK, klipK))),
coef * pcs_flat)
result = np.zeros((klipK, width, width_x))
for i in range(klipK):
temp_result = result_flat[i]
result[i] = temp_result.reshape(width, width_x)
return result*std
def klip_fm_main(path = './test/', path_obs = None, angles = None, psf = None, pipeline_input = 'ALICE', alice_size = None):
disk_model = fits.getdata(path + 'data_1.12/RT.fits.gz')[0, 0, 0]
disk_model[int((disk_model.shape[0]-1)/2)-2:int((disk_model.shape[0]-1)/2)+3, int((disk_model.shape[0]-1)/2)-2:int((disk_model.shape[0]-1)/2)+3] = 0
# Exclude the star in the above line
if psf is not None:
if len(psf.shape) != 2:
raise valueError('The input PSF is not 2D, please pass a 2D one here!')
psf /= np.nansum(psf) #Normalize the PSF (planet PSF) in case the input is not equal to 1
convolved0 = image_registration.fft_tools.convolve_nd.convolvend(disk_model, psf)
disk_model = convolved0
if path_obs is None:
path_obs = './data_observation/'
components = fits.getdata(path_obs + 'NICMOS/HD-191089_NICMOS_F110W_Lib-84_KL-19_KLmodes.fits')
mask = fits.getdata(path_obs + 'NICMOS/HD-191089_NICMOS_F110W_Lib-84_KL-19_Mask.fits')
if angles is None:
angles = np.concatenate([[19.5699]*4, [49.5699]*4]) # The values are hard coded for HD 191089 NICMOS observations, pelase change it for other targets.
disk_rotated = dependencies.rotateCube(disk_model, angle = angles, maskedNaN=True, outputMask=False)
masks_rotated = np.ones(disk_rotated.shape)
masks_rotated[np.where(np.isnan(disk_rotated))] = 0
if pipeline_input == 'ALICE':
mask = mask[1:, 1:]
masks_rotated *= mask
if pipeline_input == 'ALICE':
if alice_size is None:
alice_size = 140
# The ALICE pipeline has image of even size, and the center of the star is at the center of the image with the 1st row and 1st column cropped
# Solution as follows (create 140*140 or 80*80 images, with the 1st row and 1st column set to be all 0's)
disk_rotated_140 = np.zeros((disk_rotated.shape[0], alice_size, alice_size)) # make size = 140x140 images for KLIP
disk_rotated_140[:, 1:, 1:] = disk_rotated
mask_rotated_140 = np.zeros(disk_rotated_140.shape)
mask_rotated_140[:, 1:, 1:] = masks_rotated
disk_rotated = disk_rotated_140
del disk_rotated_140
masks_rotated = mask_rotated_140
del mask_rotated_140
results_rotated = np.zeros(disk_rotated.shape)
for i, data_slice in enumerate(disk_rotated):
results_rotated[i] = klip(data_slice, pcs = components[i], mask = masks_rotated[i], cube=False)
mask_rotated_nan = np.ones(masks_rotated.shape)
mask_rotated_nan[np.where(masks_rotated==0)] = np.nan
if pipeline_input == 'ALICE':
results_rotated_old = results_rotated[:, 1:, 1:]
results_rotated = results_rotated_old
del results_rotated_old
mask_rotated_nan_old = mask_rotated_nan[:, 1:, 1:]
mask_rotated_nan = mask_rotated_nan_old
del mask_rotated_nan_old
results = dependencies.rotateCube(results_rotated*mask_rotated_nan, mask = None, angle = -angles, maskedNaN=True, outputMask=False)
mask_detorated_nan = np.ones(results.shape)
mask_detorated_nan[np.where(np.isnan(results))] = np.nan
result_klip = np.nansum(results, axis = 0)/np.nansum(mask_detorated_nan, axis = 0)
if pipeline_input == 'ALICE':
if alice_size is None:
alice_size = 140
result_klip_alice = np.zeros((alice_size, alice_size))
result_klip_alice[1:, 1:] = result_klip
result_klip = result_klip_alice
del result_klip_alice
return result_klip
# test = klip_fm_main()
| 39.555556
| 158
| 0.616833
|
566a9db59b76ffb4bc3d5b11fd2b520425fc2044
| 4,158
|
py
|
Python
|
indico/web/http_api/metadata/xml.py
|
nicolas-harraudeau-sonarsource/indico
|
419b6152529bb70a5a01471be6bb6d91b29bd43e
|
[
"MIT"
] | 1
|
2021-08-11T19:13:18.000Z
|
2021-08-11T19:13:18.000Z
|
indico/web/http_api/metadata/xml.py
|
vintas/indico
|
e45f80db156557bb09e9b579683b42c210c31653
|
[
"MIT"
] | null | null | null |
indico/web/http_api/metadata/xml.py
|
vintas/indico
|
e45f80db156557bb09e9b579683b42c210c31653
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
from datetime import datetime
import dateutil.parser
from lxml import etree
from pytz import timezone, utc
from indico.core.logger import Logger
from indico.util.string import to_unicode
from indico.web.http_api.metadata.serializer import Serializer
def _deserialize_date(date_dict):
dt = datetime.combine(dateutil.parser.parse(date_dict['date']).date(),
dateutil.parser.parse(date_dict['time']).time())
return timezone(date_dict['tz']).localize(dt).astimezone(utc)
class XMLSerializer(Serializer):
"""
Receive a fossil (or a collection of them) and converts them to XML.
"""
_mime = 'text/xml'
def __init__(self, query_params, pretty=False, **kwargs):
self._typeMap = kwargs.pop('typeMap', {})
super(XMLSerializer, self).__init__(query_params, pretty, **kwargs)
def _convert(self, value, _control_char_re=re.compile(r'[\x00-\x08\x0b\x0c\x0e-\x1f]')):
if isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, (int, long, float, bool)):
return str(value)
else:
value = to_unicode(value) if isinstance(value, str) else value
if isinstance(value, basestring):
# Get rid of control chars breaking XML conversion
value = _control_char_re.sub(u'', value)
return value
def _xmlForFossil(self, fossil, doc=None):
attribs = {}
id = None
if '_fossil' in fossil:
attribs['fossil'] = fossil['_fossil']
if 'id' in fossil:
id = attribs['id'] = str(fossil['id'])
if '_type' in fossil:
typeName = self._typeMap.get(fossil['_type'], fossil['_type'])
else:
typeName = 'collection'
felement = etree.Element(typeName.lower(),
attrib=attribs)
if doc:
doc.getroot().append(felement)
for k, v in fossil.iteritems():
if k in ['_fossil', '_type', 'id']:
continue
if isinstance(k, (int, float)) or (isinstance(k, basestring) and k.isdigit()):
elem = etree.SubElement(felement, 'entry', {'key': unicode(k)})
else:
elem = etree.SubElement(felement, k)
if isinstance(v, dict) and set(v.viewkeys()) == {'date', 'time', 'tz'}:
v = _deserialize_date(v)
if isinstance(v, (list, tuple)):
onlyDicts = all(isinstance(subv, dict) for subv in v)
if onlyDicts:
for subv in v:
elem.append(self._xmlForFossil(subv))
else:
for subv in v:
if isinstance(subv, dict):
elem.append(self._xmlForFossil(subv))
else:
subelem = etree.SubElement(elem, 'item')
subelem.text = self._convert(subv)
elif isinstance(v, dict):
elem.append(self._xmlForFossil(v))
else:
txt = self._convert(v)
try:
elem.text = txt
except Exception:
Logger.get('xmlSerializer').exception('Setting XML text value failed (id: %s, value %r)', id, txt)
return felement
def _execute(self, fossil, xml_declaration=True):
if isinstance(fossil, list):
# collection of fossils
doc = etree.ElementTree(etree.Element("collection"))
for elem in fossil:
self._xmlForFossil(elem, doc)
result = doc
else:
result = self._xmlForFossil(fossil)
return etree.tostring(result, pretty_print=self.pretty,
xml_declaration=xml_declaration, encoding='utf-8')
Serializer.register('xml', XMLSerializer)
| 36.79646
| 118
| 0.564695
|
b451d4c575757bc850106e980f87d086af148699
| 10,712
|
py
|
Python
|
argparse/argparse_galaxy_translation.py
|
bernt-matthias/argparse2tool
|
cca59571ad18f0bcc7b863ebbdd42a3480801aaf
|
[
"Apache-2.0"
] | null | null | null |
argparse/argparse_galaxy_translation.py
|
bernt-matthias/argparse2tool
|
cca59571ad18f0bcc7b863ebbdd42a3480801aaf
|
[
"Apache-2.0"
] | null | null | null |
argparse/argparse_galaxy_translation.py
|
bernt-matthias/argparse2tool
|
cca59571ad18f0bcc7b863ebbdd42a3480801aaf
|
[
"Apache-2.0"
] | null | null | null |
import galaxyxml.tool.parameters as gxtp
from collections import Counter
from pydoc import locate
class ArgparseGalaxyTranslation(object):
def __gxtp_param_from_type(self, param, flag, label, num_dashes, gxparam_extra_kwargs, default=None):
from argparse import FileType
"""Based on a type, convert to appropriate gxtp class
"""
if default is None and (param.type in (int, float)):
default = 0
if param.type == int:
mn = None
mx = None
if param.choices is not None:
mn = min(param.choices)
mx = max(param.choices)
gxparam = gxtp.IntegerParam(flag, default, label=label, min=mn, max=mx, num_dashes=num_dashes, **gxparam_extra_kwargs)
elif param.choices is not None:
choices = {k: k for k in param.choices}
gxparam = gxtp.SelectParam(flag, default=default, label=label, num_dashes=num_dashes, options=choices, **gxparam_extra_kwargs)
elif param.type == float:
gxparam = gxtp.FloatParam(flag, default, label=label, num_dashes=num_dashes, **gxparam_extra_kwargs)
elif param.type is None or param.type == str:
gxparam = gxtp.TextParam(flag, value=default, label=label, num_dashes=num_dashes, **gxparam_extra_kwargs)
elif param.type == locate('file'):
gxparam = gxtp.DataParam(flag, label=label, num_dashes=num_dashes, **gxparam_extra_kwargs)
elif isinstance(param.type, FileType):
if 'w' in param.type._mode:
gxparam = gxtp.OutputParameter(
flag, format='data', default=default, label=label,
num_dashes=num_dashes, **gxparam_extra_kwargs
)
else:
gxparam = gxtp.DataParam(
flag, default=default, label=label, num_dashes=num_dashes,
**gxparam_extra_kwargs
)
else:
gxparam = None
return gxparam
def __args_from_nargs(self, param, repeat_name, repeat_var_name, positional, flag):
"""Based on param.nargs, return the appropriate overrides
"""
gxrepeat_args = []
gxrepeat_kwargs = {}
gxrepeat_cli_after = None
gxrepeat_cli_before = None
gxrepeat_cli_actual = None
gxparam_cli_before = None
gxparam_cli_after = None
if positional:
gxrepeat_cli_actual = '"$%s"' % (repeat_var_name)
else:
gxrepeat_cli_actual = '%s "$%s"' % (param.option_strings[0], repeat_var_name)
if isinstance(param.nargs, int):
# N (an integer). N arguments from the command line will be
# gathered together into a list. For example:
if param.nargs > 1:
gxrepeat_args = [repeat_name, 'repeat_title']
gxrepeat_kwargs = {
'min': param.nargs,
'max': param.nargs,
}
else:
# If we have only one, we don't want a gxrepeat, so we leave well
# enough alone
gxrepeat_args = None
elif param.nargs == '?':
# '?'. One argument will be consumed from the command line if
# possible, and produced as a single item. If no command-line
# argument is present, the value from default will be produced.
# Note that for optional arguments, there is an additional case -
# the option string is present but not followed by a command-line
# argument. In this case the value from const will be produced
# This does NOT provide a way to access the value in const, but
# that seems like a HORRIBLE idea anyway. Seriously, who does that.
gxparam_cli_before = """\n#if $%s and $%s is not None:""" % (flag, flag)
gxparam_cli_after = '#end if'
gxrepeat_args = None
elif param.nargs is None:
# Very similar to '?' but without the case of "optional + specified
# withouth an argument" being OK
#
# This has changed over time, we're (probably) going overboard here.
gxparam_cli_before = """\n#if $%s and $%s is not None:""" % (flag, flag)
gxparam_cli_after = '#end if'
gxrepeat_args = None
elif param.nargs == '*':
# '*'. All command-line arguments present are gathered into a list.
# Note that it generally doesn't make much sense to have more than
# one positional argument with nargs='*', but multiple optional
# arguments with nargs='*' is possible. For example:
# This needs to be handled with a
# set files = '" "'.join( [ str( $file ) for $file in $inputB ] )
gxrepeat_args = [repeat_name, 'repeat_title']
# gxrepeat_cli_after = '#end if\n'
gxrepeat_cli_after = ''
gxrepeat_cli_before = """\n#set %s = '" "'.join([ str($var.%s) for $var in $%s ])""" % (repeat_var_name, flag, repeat_name)
elif param.nargs == '+':
# '+'. Just like '*', all command-line args present are gathered
# into a list. Additionally, an error message will be generated if
# there wasn't at least one command-line argument present. For
# example:
gxrepeat_args = [repeat_name, 'repeat_title']
gxrepeat_kwargs = {'min': 1}
gxrepeat_cli_after = ''
gxrepeat_cli_before = """\n#set %s = '" "'.join([ str($var.%s) for $var in $%s ])""" % (repeat_var_name, flag, repeat_name)
else:
raise Exception("TODO: Handle argparse.REMAINDER")
return (gxrepeat_args, gxrepeat_kwargs, gxrepeat_cli_after,
gxrepeat_cli_before, gxrepeat_cli_actual, gxparam_cli_before, gxparam_cli_after)
def __init__(self):
self.repeat_count = 0
self.positional_count = Counter()
def _VersionAction(self, param, tool=None):
# passing tool is TERRIBLE, I know.
# TODO handle their templating of version
# This is kinda ugly but meh.
tool.root.attrib['version'] = param.version
# Count the repeats for unique names
# TODO improve
def _StoreAction(self, param, tool=None):
"""
Parse argparse arguments action type of "store", the default.
param: argparse.Action
"""
gxparam = None
gxrepeat = None
self.repeat_count += 1
gxparam_extra_kwargs = {}
if not param.required:
gxparam_extra_kwargs['optional'] = True
# Positional arguments don't have an option strings
positional = len(param.option_strings) == 0
if not positional:
flag = max(param.option_strings, key=len) # Pick the longest of the options strings
else:
flag = ''
self.positional_count['param.dest'] += 1
repeat_name = 'repeat_%s' % self.repeat_count
repeat_var_name = 'repeat_var_%s' % self.repeat_count
# TODO: Replace with logic supporting characters other than -
flag_wo_dashes = flag.lstrip('-')
num_dashes = len(flag) - len(flag_wo_dashes)
# Moved because needed in developing repeat CLI
if positional:
v = self.positional_count[param.dest]
flag_wo_dashes = '%s%s' % (param.dest, '_' + str(v) if v > 1 else '')
# SO unclean
gxparam_extra_kwargs['positional'] = True
# Figure out parameters and overrides from param.nargs, mainly.
# This is really unpleasant.
(gxrepeat_args, gxrepeat_kwargs, gxrepeat_cli_after,
gxrepeat_cli_before, gxrepeat_cli_actual, gxparam_cli_before,
gxparam_cli_after) = \
self.__args_from_nargs(param, repeat_name, repeat_var_name, positional, flag_wo_dashes)
# Build the gxrepeat if it's needed
if gxrepeat_args is not None:
gxrepeat = gxtp.Repeat(*gxrepeat_args, **gxrepeat_kwargs)
if gxrepeat_cli_before is not None:
gxrepeat.command_line_before_override = gxrepeat_cli_before
if gxrepeat_cli_after is not None:
gxrepeat.command_line_after_override = gxrepeat_cli_after
if gxrepeat_cli_actual is not None:
gxrepeat.command_line_override = gxrepeat_cli_actual
else:
gxrepeat = None
gxparam = self.__gxtp_param_from_type(
param, flag_wo_dashes, param.help, num_dashes,
gxparam_extra_kwargs, default=param.default
)
# Not really happy with this way of doing this
if gxparam_cli_before is not None:
gxparam.command_line_before_override = gxparam_cli_before
if gxparam_cli_after is not None:
gxparam.command_line_after_override = gxparam_cli_after
# if positional argument, wipe out the CLI flag that's usually present
if positional:
gxparam.command_line_override = '$%s' % flag_wo_dashes
if gxrepeat is not None and gxparam is not None:
gxrepeat.append(gxparam)
return gxrepeat
elif gxrepeat is None and gxparam is not None:
return gxparam
else:
raise Exception("huh")
return None
def _StoreTrueAction(self, param, **kwargs):
return self._StoreConstAction(param, **kwargs)
def _StoreFalseAction(self, param, **kwargs):
return self._StoreConstAction(param, **kwargs)
def _AppendAction(self, param, **kwargs):
self.repeat_count += 1
repeat_name = 'repeat_%s' % self.repeat_count
# TODO: Replace with logic supporting characters other than -
flag = max(param.option_strings, key=len) # Pick one of the options strings
flag_wo_dashes = flag.lstrip('-')
num_dashes = len(flag) - len(flag_wo_dashes)
gxparam = self.__gxtp_param_from_type(param, flag_wo_dashes, param.help, num_dashes, {})
gxrepeat = gxtp.Repeat(repeat_name, 'Repeated Variable')
gxrepeat.command_line_override = '%s $%s.%s' % (param.option_strings[0], 'i', flag_wo_dashes)
gxrepeat.append(gxparam)
return gxrepeat
def _StoreConstAction(self, param, **kwargs):
flag = max(param.option_strings, key=len) # Pick one of the options strings
flag_wo_dashes = flag.lstrip('-')
num_dashes = len(flag) - len(flag_wo_dashes)
gxparam = gxtp.BooleanParam(flag_wo_dashes, label=param.help, num_dashes=num_dashes)
return gxparam
| 42.848
| 138
| 0.609503
|
8c501bf7bab37d1dd11b1925cba121a0b2929deb
| 3,777
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
FOScoin/FossilCoin
|
4f0ca695a66ca8afc61ff44c1ef90b3e17c93ff5
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
FOScoin/FossilCoin
|
4f0ca695a66ca8afc61ff44c1ef90b3e17c93ff5
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
FOScoin/FossilCoin
|
4f0ca695a66ca8afc61ff44c1ef90b3e17c93ff5
|
[
"MIT"
] | 3
|
2018-09-16T03:32:08.000Z
|
2019-03-02T11:37:28.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['FossilCoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.918033
| 1,817
| 0.727562
|
33c99fb8cb75ea538c6603a6dda6d34aabda47cb
| 399
|
py
|
Python
|
app/core/migrations/0009_notification_value.py
|
pnsn/squac_api
|
7b1741cd32dbc32972f75ac53958468fdf7d04ee
|
[
"MIT"
] | 6
|
2019-11-07T09:32:43.000Z
|
2021-09-02T22:37:14.000Z
|
app/core/migrations/0009_notification_value.py
|
pnsn/squacapi
|
e330a925aff38937656983841e495ba3824c11e1
|
[
"MIT"
] | 196
|
2019-10-04T17:03:36.000Z
|
2022-03-31T17:54:59.000Z
|
app/core/migrations/0009_notification_value.py
|
pnsn/squac_api
|
7b1741cd32dbc32972f75ac53958468fdf7d04ee
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-12-04 23:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20201030_1517'),
]
operations = [
migrations.AddField(
model_name='notification',
name='value',
field=models.CharField(default='', max_length=255),
),
]
| 21
| 63
| 0.598997
|
b05fed6fa4fe62050c7eb104ab074622d34d0c9f
| 5,472
|
py
|
Python
|
flexget/plugins/input/kitsu.py
|
andocromn/Flexget
|
853d9e0d6bdcf5c675afd50b8604b9e5805d9b11
|
[
"MIT"
] | null | null | null |
flexget/plugins/input/kitsu.py
|
andocromn/Flexget
|
853d9e0d6bdcf5c675afd50b8604b9e5805d9b11
|
[
"MIT"
] | 1
|
2017-10-09T23:06:44.000Z
|
2017-10-09T23:06:44.000Z
|
flexget/plugins/input/kitsu.py
|
andocromn/Flexget
|
853d9e0d6bdcf5c675afd50b8604b9e5805d9b11
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
log = logging.getLogger('kitsu')
class KitsuAnime(object):
"""
Creates an entry for each item in your kitsu.io list.
Syntax:
kitsu:
username: <value>
lists:
- <current|planned|completed|on_hold|dropped>
- <current|planned|completed|on_hold|dropped>
status: <airing|finished>
latest: <yes|no>
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'lists': one_or_more(
{
'type': 'string',
'enum': ['current', 'planned', 'completed', 'on_hold', 'dropped'],
}
),
'latest': {'type': 'boolean', 'default': False},
'status': {'type': 'string', 'enum': ['airing', 'finished']},
},
'required': ['username'],
'additionalProperties': False,
}
@cached('kitsu', persist='2 hours')
def on_task_input(self, task, config):
entries = []
user_payload = {'filter[name]': config['username']}
try:
user_response = task.requests.get(
'https://kitsu.io/api/edge/users', params=user_payload
)
except RequestException as e:
error_message = 'Error finding User url: {url}'.format(url=e.request.url)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
user = user_response.json()
if not len(user['data']):
raise plugin.PluginError(
'no such username found "{name}"'.format(name=config['username'])
)
next_url = 'https://kitsu.io/api/edge/users/{id}/library-entries'.format(
id=user['data'][0]['id']
)
payload = {
'filter[status]': ','.join(config['lists']),
'filter[media_type]': 'Anime',
'include': 'media',
'page[limit]': 20,
}
try:
response = task.requests.get(next_url, params=payload)
except RequestException as e:
error_message = 'Error getting list from {url}'.format(url=e.request.url)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
json_data = response.json()
while json_data:
for item, anime in zip(json_data['data'], json_data['included']):
if item['relationships']['media']['data']['id'] != anime['id']:
raise ValueError(
'Anime IDs {id1} and {id2} do not match'.format(
id1=item['relationships']['media']['data']['id'], id2=anime['id']
)
)
status = config.get('status')
if status is not None:
if status == 'airing' and anime['attributes']['endDate'] is not None:
continue
if status == 'finished' and anime['attributes']['endDate'] is None:
continue
entry = Entry()
entry['title'] = anime['attributes']['canonicalTitle']
titles_en = anime['attributes']['titles'].get('en')
if titles_en:
entry['kitsu_title_en'] = titles_en
titles_en_jp = anime['attributes']['titles'].get('en_jp')
if titles_en_jp:
entry['kitsu_title_en_jp'] = titles_en_jp
titles_ja_jp = anime['attributes']['titles'].get('ja_jp')
if titles_ja_jp:
entry['kitsu_title_ja_jp'] = titles_ja_jp
entry['url'] = anime['links']['self']
if entry.isvalid():
if config.get('latest'):
entry['series_episode'] = item['progress']
entry['series_id_type'] = 'sequence'
entry['title'] += ' ' + str(entry['progress'])
entries.append(entry)
next_url = json_data['links'].get('next')
if next_url:
try:
response = task.requests.get(next_url)
except RequestException as e:
error_message = 'Error getting list from next page url: {url}'.format(
url=e.request.url
)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
json_data = response.json()
else:
break
return entries
@event('plugin.register')
def register_plugin():
plugin.register(KitsuAnime, 'kitsu', api_ver=2)
| 38
| 98
| 0.523575
|
c2f0cdd602cb1be0f3da693ff473dbfe2fa21451
| 2,212
|
py
|
Python
|
biblion/templatetags/biblion_tags.py
|
alexissantos/biblion
|
012c3ca230f264f044b6e5f35027c817eafd1de5
|
[
"BSD-3-Clause"
] | null | null | null |
biblion/templatetags/biblion_tags.py
|
alexissantos/biblion
|
012c3ca230f264f044b6e5f35027c817eafd1de5
|
[
"BSD-3-Clause"
] | null | null | null |
biblion/templatetags/biblion_tags.py
|
alexissantos/biblion
|
012c3ca230f264f044b6e5f35027c817eafd1de5
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
from biblion.models import Post
from biblion.conf import settings
register = template.Library()
class LatestBlogPostsNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
latest_posts = Post.objects.current()[:5]
context[self.context_var] = latest_posts
return ""
@register.tag
def latest_blog_posts(parser, token):
bits = token.split_contents()
return LatestBlogPostsNode(bits[2])
class LatestBlogPostNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
try:
latest_post = Post.objects.current()[0]
except IndexError:
latest_post = None
context[self.context_var] = latest_post
return ""
@register.tag
def latest_blog_post(parser, token):
bits = token.split_contents()
return LatestBlogPostNode(bits[2])
class LatestSectionPostNode(template.Node):
def __init__(self, section, context_var):
self.section = template.Variable(section)
self.context_var = context_var
def render(self, context):
section = self.section.resolve(context)
post = Post.objects.section(section, queryset=Post.objects.current())
try:
post = post[0]
except IndexError:
post = None
context[self.context_var] = post
return ""
@register.tag
def latest_section_post(parser, token):
"""
{% latest_section_post "articles" as latest_article_post %}
"""
bits = token.split_contents()
return LatestSectionPostNode(bits[1], bits[3])
class BlogSectionsNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
sections = [(settings.BIBLION_ALL_SECTION_NAME, "All")]
sections += settings.BIBLION_SECTIONS
context[self.context_var] = sections
return ""
@register.tag
def blog_sections(parser, token):
"""
{% blog_sections as blog_sections %}
"""
bits = token.split_contents()
return BlogSectionsNode(bits[2])
| 24.043478
| 77
| 0.664557
|
c423c112db151ac71d8e6a6bc13b77f548cf62bc
| 535
|
py
|
Python
|
setup.py
|
wyfo/contextclass
|
4655574b5b76909b46fc8db7588ffd30cecc599c
|
[
"MIT"
] | null | null | null |
setup.py
|
wyfo/contextclass
|
4655574b5b76909b46fc8db7588ffd30cecc599c
|
[
"MIT"
] | null | null | null |
setup.py
|
wyfo/contextclass
|
4655574b5b76909b46fc8db7588ffd30cecc599c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
with open("README.md") as f:
README = f.read()
setup(
name="contextclass",
url="https://github.com/wyfo/contextclass",
author="Joseph Perez",
author_email="joperez@hotmail.fr",
description="Typed class wrapper for context variables",
long_description=README,
long_description_content_type="text/markdown",
version="0.2",
packages=find_packages(include=["contextclasses"]),
classifiers=[
"Programming Language :: Python :: 3.7",
],
)
| 26.75
| 60
| 0.684112
|
b8ac098454d2c1aaeb576a225911d0a2d9146c9a
| 10,473
|
py
|
Python
|
scripts/mesh_lpt_benchmark.py
|
santiagocasas/flowpm
|
9145d0c846d886f2cb34955f2267aa8cc9da31da
|
[
"MIT"
] | null | null | null |
scripts/mesh_lpt_benchmark.py
|
santiagocasas/flowpm
|
9145d0c846d886f2cb34955f2267aa8cc9da31da
|
[
"MIT"
] | null | null | null |
scripts/mesh_lpt_benchmark.py
|
santiagocasas/flowpm
|
9145d0c846d886f2cb34955f2267aa8cc9da31da
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import math
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import mesh_tensorflow as mtf
import time
import sys
sys.path.append('../')
sys.path.append('../flowpm/')
import flowpm.mesh_ops as mpm
import flowpm.mtfpm as mtfpm
import flowpm.mesh_utils as mesh_utils
import flowpm
from astropy.cosmology import Planck15
from flowpm.tfpm import PerturbationGrowth
from flowpm import linear_field, lpt_init, nbody, cic_paint
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from matplotlib import pyplot as plt
cosmology = Planck15
tf.flags.DEFINE_integer("gpus_per_node", 8, "Number of GPU on each node")
tf.flags.DEFINE_integer("gpus_per_task", 8, "Number of GPU in each task")
tf.flags.DEFINE_integer("tasks_per_node", 1, "Number of task in each node")
tf.flags.DEFINE_integer("nc", 128, "Size of the cube")
tf.flags.DEFINE_integer("batch_size", 1, "Batch Size")
tf.flags.DEFINE_float("box_size", 500, "Batch Size")
tf.flags.DEFINE_float("a0", 0.1, "initial scale factor")
tf.flags.DEFINE_float("af", 1.0, "final scale factor")
tf.flags.DEFINE_integer("nsteps", 5, "Number of time steps")
#pyramid flags
tf.flags.DEFINE_integer("dsample", 0, "downsampling factor")
tf.flags.DEFINE_integer("hsize", 0, "halo size")
#mesh flags
tf.flags.DEFINE_integer("nx", 4, "# blocks along x")
tf.flags.DEFINE_integer("ny", 2, "# blocks along y")
tf.flags.DEFINE_string("mesh_shape", "row:16", "mesh shape")
#tf.flags.DEFINE_string("layout", "nx:b1", "layout rules")
tf.flags.DEFINE_string("output_file", "timeline",
"Name of the output timeline file")
FLAGS = tf.flags.FLAGS
def lpt_prototype(mesh,
nc=FLAGS.nc,
bs=FLAGS.box_size,
batch_size=FLAGS.batch_size,
a0=FLAGS.a0,
a=FLAGS.af,
nsteps=FLAGS.nsteps):
"""
Prototype of function computing LPT deplacement.
Returns output tensorflow and mesh tensorflow tensors
"""
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
stages = np.linspace(a0, a, nsteps, endpoint=True)
# Define the named dimensions
# Parameters of the small scales decomposition
n_block_x = FLAGS.nx
n_block_y = FLAGS.ny
n_block_z = 1
halo_size = FLAGS.hsize
if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):
new_size = int(0.5 *
min(nc // n_block_x, nc // n_block_y, nc // n_block_z))
print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))
halo_size = new_size
# Parameters of the large scales decomposition
downsampling_factor = 0
lnc = nc // 2**downsampling_factor
#
fx_dim = mtf.Dimension("nx", nc)
fy_dim = mtf.Dimension("ny", nc)
fz_dim = mtf.Dimension("nz", nc)
tfx_dim = mtf.Dimension("tx", nc)
tfy_dim = mtf.Dimension("ty", nc)
tfz_dim = mtf.Dimension("tz", nc)
tx_dim = mtf.Dimension("tx_lr", nc)
ty_dim = mtf.Dimension("ty_lr", nc)
tz_dim = mtf.Dimension("tz_lr", nc)
nx_dim = mtf.Dimension('nx_block', n_block_x)
ny_dim = mtf.Dimension('ny_block', n_block_y)
nz_dim = mtf.Dimension('nz_block', n_block_z)
sx_dim = mtf.Dimension('sx_block', nc // n_block_x)
sy_dim = mtf.Dimension('sy_block', nc // n_block_y)
sz_dim = mtf.Dimension('sz_block', nc // n_block_z)
k_dims = [tx_dim, ty_dim, tz_dim]
batch_dim = mtf.Dimension("batch", batch_size)
pk_dim = mtf.Dimension("npk", len(plin))
pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kx = mtf.import_tf_tensor(mesh,
kvec[0].squeeze().astype('float32'),
shape=[tfx_dim])
ky = mtf.import_tf_tensor(mesh,
kvec[1].squeeze().astype('float32'),
shape=[tfy_dim])
kz = mtf.import_tf_tensor(mesh,
kvec[2].squeeze().astype('float32'),
shape=[tfz_dim])
kv = [ky, kz, kx]
# kvec for low resolution grid
kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
kx_lr = mtf.import_tf_tensor(mesh,
kvec_lr[0].squeeze().astype('float32'),
shape=[tx_dim])
ky_lr = mtf.import_tf_tensor(mesh,
kvec_lr[1].squeeze().astype('float32'),
shape=[ty_dim])
kz_lr = mtf.import_tf_tensor(mesh,
kvec_lr[2].squeeze().astype('float32'),
shape=[tz_dim])
kv_lr = [ky_lr, kz_lr, kx_lr]
shape = [batch_dim, fx_dim, fy_dim, fz_dim]
lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
# Begin simulation
initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
# # Reshaping array into high resolution mesh
# field = mtf.slicewise(lambda x:tf.expand_dims(tf.expand_dims(tf.expand_dims(x, axis=1),axis=1),axis=1),
# [initc],
# output_dtype=tf.float32,
# output_shape=hr_shape,
# name='my_reshape',
# splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
#
state = mtfpm.lpt_init_single(
initc,
a0,
kv_lr,
halo_size,
lr_shape,
hr_shape,
part_shape[1:],
antialias=True,
)
# Here we can run our nbody
final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)
# paint the field
final_field = mtf.zeros(mesh, shape=hr_shape)
for block_size_dim in hr_shape[-3:]:
final_field = mtf.pad(final_field, [halo_size, halo_size],
block_size_dim.name)
final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
# Halo exchange
for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,
halo_size)
# Remove borders
for block_size_dim in hr_shape[-3:]:
final_field = mtf.slice(final_field, halo_size, block_size_dim.size,
block_size_dim.name)
#final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])
# Hack usisng custom reshape because mesh is pretty dumb
final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],
output_dtype=tf.float32,
output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
name='my_dumb_reshape',
splittable_dims=part_shape[:-1] + hr_shape[:4])
return initc, final_field
##
def main(_):
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
print(mesh_shape)
#layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
#mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
("ny", "col"), ("ty", "row"), ("tz", "col"),
("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"),
("ny_block", "col")]
# Resolve the cluster from SLURM environment
cluster = tf.distribute.cluster_resolver.SlurmClusterResolver(
{"mesh": mesh_shape.size // FLAGS.gpus_per_task},
port_base=8822,
gpus_per_node=FLAGS.gpus_per_node,
gpus_per_task=FLAGS.gpus_per_task,
tasks_per_node=FLAGS.tasks_per_node)
cluster_spec = cluster.cluster_spec()
# Create a server for all mesh members
server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id)
# Only he master job takes care of the graph building,
# everyone else can just chill for now
if cluster.task_id > 0:
server.join()
# Otherwise we are the main task, let's define the devices
mesh_devices = [
"/job:mesh/task:%d/device:GPU:%d" % (i, j)
for i in range(cluster_spec.num_tasks("mesh"))
for j in range(FLAGS.gpus_per_task)
]
print("List of devices", mesh_devices)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
# Build the model
# Create computational graphs and some initializations
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "nbody_mesh")
initial_conditions, mesh_final_field = lpt_prototype(
mesh, bs=FLAGS.box_size, nc=FLAGS.nc, batch_size=FLAGS.batch_size)
# Lower mesh computation
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
# Retrieve output of computation
initc = lowering.export_to_tf_tensor(initial_conditions)
result = lowering.export_to_tf_tensor(mesh_final_field)
with tf.Session(server.target,
config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
a, c = sess.run([initc, result])
plt.figure(figsize=(9, 3))
plt.subplot(121)
plt.imshow(a[0].sum(axis=2))
plt.title('Initial Conditions')
plt.subplot(122)
plt.imshow(c[0].sum(axis=2))
plt.title('Mesh TensorFlow')
plt.colorbar()
plt.savefig("figs/mesh_lpt_%d-b1%d-b2%d.png" %
(FLAGS.nc, FLAGS.nx, FLAGS.ny))
with tf.Session(server.target) as sess:
start = time.time()
err = sess.run(result)
end = time.time()
niter = 10
start = time.time()
for i in range(niter):
err = sess.run(result)
end = time.time()
ttime = (end - start) / niter
# profiler = tf.profiler.Profiler(sess.graph)
#
# run_meta = tf.RunMetadata()
# err = sess.run(result,
# options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
# run_metadata=run_meta)
#
# profiler.add_step(0, run_meta)
#
# opts = (tf.profiler.ProfileOptionBuilder(
# tf.profiler.ProfileOptionBuilder.time_and_memory())
# .with_step(0)
# .with_timeline_output(FLAGS.output_file).build())
# profiler.profile_graph(options=opts)
#
print('Time for ', mesh_shape, ' is : ', ttime)
exit(-1)
if __name__ == "__main__":
tf.app.run(main=main)
| 34.003247
| 144
| 0.636112
|
7904640078814a724b5f91b401860607bf111587
| 3,568
|
py
|
Python
|
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import argparse
import logging
from typing import TextIO
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args(args)
return args
def init_logging(debug=False):
msg_format = '%(asctime)s %(levelname)s %(message)s'
date_format = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msg_format, datefmt=date_format, level=level)
class Image:
def __init__(self, pixels: dict[tuple[int, int], str], void_pixel: str):
self.pixels = pixels
self.void_pixel = void_pixel
def __getitem__(self, key: tuple[int, int]) -> str:
try:
return self.pixels[key]
except KeyError:
return self.void_pixel
@staticmethod
def from_grid(grid: list[list[str]]) -> Image:
pixels = Image.grid2pixel(grid)
return Image(pixels, '.')
@staticmethod
def grid2pixel(grid: list[list[str]]) -> dict[tuple[int, int], str]:
image = {}
for y in range(len(grid)):
for x in range(len(grid[0])):
image[(x, y)] = grid[y][x]
return image
@staticmethod
def neighbors(pixel: tuple[int, int]) -> list[tuple[int, int]]:
x = pixel[0]
y = pixel[1]
return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]
def pixel2idx(self, pixel: str) -> int:
bin_rep = pixel.replace('#', '1').replace('.', '0')
return int(bin_rep, base=2)
def enhance_pixel(self, iea: str, pixel: tuple[int, int]) -> str:
surround = [self[n] for n in self.neighbors(pixel)]
idx = self.pixel2idx(''.join(surround))
return iea[idx]
def bounds(self) -> tuple[int, ...]:
x_values = [p[0] for p in self.pixels]
y_values = [p[1] for p in self.pixels]
return min(x_values), min(y_values), max(x_values), max(y_values)
def enhance(self, iea: str) -> Image:
new_pixels = {}
min_x, min_y, max_x, max_y = self.bounds()
for x in range(min_x - 2, max_x + 2):
for y in range(min_y - 2, max_y + 2):
new_pixels[(x, y)] = self.enhance_pixel(iea, (x, y))
void_pixel = iea[self.pixel2idx(self.void_pixel * 9)]
return Image(new_pixels, void_pixel)
def lit_count(self):
return len([v for v in self.pixels.values() if v == '#'])
def load_input(fp: TextIO):
data = fp.read().strip().split('\n\n')
iea = data[0]
assert len(iea) == 512
grid = []
for line in data[1].strip().split('\n'):
grid.append(list(line))
image = Image.from_grid(grid)
return iea, image
def puzzle1(iea: str, image: Image) -> int:
for i in range(2):
image = image.enhance(iea)
return image.lit_count()
def puzzle2(iea, image) -> int:
for i in range(50):
image = image.enhance(iea)
return image.lit_count()
def main(argv=None):
args = parse_args(argv)
# Init logging
init_logging(args.debug)
iea, image = load_input(args.input)
answer = puzzle1(iea, image)
logging.info('Puzzle 1: %d', answer)
answer = puzzle2(iea, image)
logging.info('Puzzle 2: %d', answer)
if __name__ == '__main__':
main()
| 29.00813
| 76
| 0.575673
|
1c23ab98f06128ce61ec705ee0ddc9f007fd5b24
| 649
|
py
|
Python
|
var/spack/repos/builtin/packages/py-fisher/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-fisher/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-fisher/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyFisher(PythonPackage):
"""Fisher's Exact Test.
Simple, fast implementation of Fisher's exact test."""
homepage = "https://github.com/brentp/fishers_exact_test"
pypi = "fisher/fisher-0.1.9.tar.gz"
version('0.1.9', sha256='d378b3f7e488e2a679c6d0e5ea1bce17bc931c2bfe8ec8424ee47a74f251968d')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
| 30.904762
| 95
| 0.724191
|
5463c4445114b390f454d8339b2e930973daace3
| 5,090
|
py
|
Python
|
src/kumparan_test/kumparanapi.py
|
teguhcf/python-flask
|
abe5136241943514038c41668ca2d814809bc1a3
|
[
"MIT"
] | null | null | null |
src/kumparan_test/kumparanapi.py
|
teguhcf/python-flask
|
abe5136241943514038c41668ca2d814809bc1a3
|
[
"MIT"
] | null | null | null |
src/kumparan_test/kumparanapi.py
|
teguhcf/python-flask
|
abe5136241943514038c41668ca2d814809bc1a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.py:
[console_scripts]
fibonacci = kumparan_test.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from pymongo import MongoClient,ASCENDING
from bson.objectid import ObjectId
from flask import Flask, json, request,jsonify
from kumparan_test.model import Model
from src.kumparan_test import __version__
__author__ = "teguhcf"
__copyright__ = "teguhcf"
__license__ = "mit"
_logger = logging.getLogger(__name__)
# from flask import Flask, jsonify, abort, make_response, request, Response
# from flask.ext.pymongo import PyMongo
# from bson.json_util import dumps
# from bson.objectid import ObjectId
application = Flask(__name__)
client = MongoClient('localhost', 27017)
db = client.kumparan
model = Model()
@application.route("/api/v1/news/add", methods=['POST'])
def addNews():
try:
json_data = request.json['data']
model.addNews(json_data)
return jsonify(status='OK', message=' News inserted successfully')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
@application.route("/api/v1/news/getlist", methods=['GET'])
def getNewsList():
try:
status = request.args.get('status')
topic_id = request.args.get('topic_id')
data = model.getNewsList(status,topic_id)
except Exception as e:
return str(e)
return json.dumps(data)
# return mongo_to_jsonResponse(machines)
@application.route('/api/v1/news/get', methods=['GET'])
def getNews():
try:
id = request.args.get('id')
data = model.getNews(id)
return json.dumps(data)
# return mongo_to_jsonResponse(machine)
except Exception as e:
return str(e)
@application.route("/api/v1/news/delete", methods=['DELETE'])
def deleteNews():
try:
id = request.args.get('id')
# machineId = request.json['id']
res = db.news.find_one({'_id': ObjectId(id)})
if res is None: return jsonify(status='ERROR', message="Data not exist")
db.news.remove({'_id': ObjectId(id)})
return jsonify(status='OK', message='deletion successful')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
@application.route('/api/v1/news/update', methods=['POST'])
def updateNews():
try:
json_data = request.json['data']
res = model.updateNews(json_data)
if res is False: return jsonify(status='ERROR', message="Data not exist")
return jsonify(status='OK', message='updated successfully')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
@application.route("/api/v1/topic/add", methods=['POST'])
def addTopic():
try:
json_data = request.json['data']
model.addTopic(json_data)
print("hhhhhhhhh")
return jsonify(status='OK', message='Topic inserted successfully')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
@application.route('/api/v1/topic/update', methods=['PUT'])
def updateTopic():
try:
json_data = request.json['data']
res = model.updateTopic(json_data)
if res is False: return jsonify(status='ERROR', message="Data not exist")
return jsonify(status='OK', message='updated successfully')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
@application.route("/api/v1/topic/getlist", methods=['GET'])
def getTopicList():
try:
topics = db.topic.find().sort([("topic_id", ASCENDING)])
except Exception as e:
return str(e)
# return json.dumps(machines)
return model.mongo_to_jsonResponse(topics)
@application.route("/api/v1/topic/get", methods=['GET'])
def getTopic():
try:
id = request.args.get('id')
data = db.topic.find_one({'topic_id': int(id)})
except Exception as e:
return str(e)
# return json.dumps(machines)
return model.mongo_to_jsonResponse(data)
@application.route("/api/v1/topic/delete", methods=['DELETE'])
def deleteTopic():
try:
id = request.args.get('id')
# machineId = request.json['id']
res = db.topic.find_one({'topic_id': int(id)})
if res is None: return jsonify(status='ERROR', message="Data not exist")
db.topic.remove({'topic_id': int(id)})
return jsonify(status='OK', message='deletion successful')
except Exception as e:
return jsonify(status='ERROR', message=str(e))
if __name__ == "__main__":
application.run(host='0.0.0.0')
| 28.277778
| 81
| 0.671513
|
0b64050553c13a36b70f3a7862d4cc5caf77a4ff
| 4,606
|
py
|
Python
|
pgmpy/tests/test_factors/test_FactorSet.py
|
echoyi/pgmpy
|
c37dda4401f23ec73fc5d17d957867cd62e588d3
|
[
"MIT"
] | 2,144
|
2015-01-05T21:25:04.000Z
|
2022-03-31T08:24:15.000Z
|
pgmpy/tests/test_factors/test_FactorSet.py
|
echoyi/pgmpy
|
c37dda4401f23ec73fc5d17d957867cd62e588d3
|
[
"MIT"
] | 1,181
|
2015-01-04T18:19:44.000Z
|
2022-03-30T17:21:19.000Z
|
pgmpy/tests/test_factors/test_FactorSet.py
|
echoyi/pgmpy
|
c37dda4401f23ec73fc5d17d957867cd62e588d3
|
[
"MIT"
] | 777
|
2015-01-01T11:13:27.000Z
|
2022-03-28T12:31:57.000Z
|
import unittest
from pgmpy.factors import FactorSet
from pgmpy.factors.discrete import DiscreteFactor
class TestFactorSet(unittest.TestCase):
def setUp(self):
self.phi1 = DiscreteFactor(["x1", "x2", "x3"], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor(["x3", "x4", "x1"], [2, 2, 2], range(8))
self.phi3 = DiscreteFactor(["x5", "x6", "x7"], [2, 2, 2], range(8))
self.phi4 = DiscreteFactor(["x5", "x7", "x8"], [2, 2, 2], range(8))
def test_class_init(self):
phi1 = DiscreteFactor(["x1", "x2", "x3"], [2, 3, 2], range(12))
phi2 = DiscreteFactor(["x3", "x4", "x1"], [2, 2, 2], range(8))
factor_set1 = FactorSet(phi1, phi2)
self.assertEqual({phi1, phi2}, factor_set1.get_factors())
def test_factorset_add_remove_factors(self):
self.factor_set1 = FactorSet()
self.factor_set1.add_factors(self.phi1, self.phi2)
self.assertEqual({self.phi1, self.phi2}, self.factor_set1.get_factors())
self.factor_set1.remove_factors(self.phi2)
self.assertEqual({self.phi1}, self.factor_set1.get_factors())
def test_factorset_product(self):
factor_set1 = FactorSet(self.phi1, self.phi2)
factor_set2 = FactorSet(self.phi3, self.phi4)
factor_set3 = factor_set2.product(factor_set1, inplace=False)
self.assertEqual(
{self.phi1, self.phi2, self.phi3, self.phi4}, factor_set3.factors
)
def test_factorset_divide(self):
phi1 = DiscreteFactor(["x1", "x2", "x3"], [2, 3, 2], range(1, 13))
phi2 = DiscreteFactor(["x3", "x4", "x1"], [2, 2, 2], range(1, 9))
factor_set1 = FactorSet(phi1, phi2)
phi3 = DiscreteFactor(["x5", "x6", "x7"], [2, 2, 2], range(1, 9))
phi4 = DiscreteFactor(["x5", "x7", "x8"], [2, 2, 2], range(1, 9))
factor_set2 = FactorSet(phi3, phi4)
factor_set3 = factor_set2.divide(factor_set1, inplace=False)
self.assertEqual(
{phi3, phi4, phi1.identity_factor() / phi1, phi2.identity_factor() / phi2},
factor_set3.factors,
)
def test_factorset_marginalize_inplace(self):
factor_set = FactorSet(self.phi1, self.phi2, self.phi3, self.phi4)
factor_set.marginalize(["x1", "x5"], inplace=True)
phi1_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x2", "x3"}, factor_set.factors)
)[0]
self.assertEqual(
self.phi1.marginalize(["x1"], inplace=False), phi1_equivalent_in_factor_set
)
phi2_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x4", "x3"}, factor_set.factors)
)[0]
self.assertEqual(
self.phi2.marginalize(["x1"], inplace=False), phi2_equivalent_in_factor_set
)
phi3_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x6", "x7"}, factor_set.factors)
)[0]
self.assertEqual(
self.phi3.marginalize(["x5"], inplace=False), phi3_equivalent_in_factor_set
)
phi4_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x8", "x7"}, factor_set.factors)
)[0]
self.assertEqual(
self.phi4.marginalize(["x5"], inplace=False), phi4_equivalent_in_factor_set
)
def test_factorset_marginalize_not_inplace(self):
factor_set = FactorSet(self.phi1, self.phi2, self.phi3, self.phi4)
new_factor_set = factor_set.marginalize(["x1", "x5"], inplace=False)
phi1_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x2", "x3"}, new_factor_set.factors)
)[0]
self.assertEqual(
self.phi1.marginalize(["x1"], inplace=False), phi1_equivalent_in_factor_set
)
phi2_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x4", "x3"}, new_factor_set.factors)
)[0]
self.assertEqual(
self.phi2.marginalize(["x1"], inplace=False), phi2_equivalent_in_factor_set
)
phi3_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x6", "x7"}, new_factor_set.factors)
)[0]
self.assertEqual(
self.phi3.marginalize(["x5"], inplace=False), phi3_equivalent_in_factor_set
)
phi4_equivalent_in_factor_set = list(
filter(lambda x: set(x.scope()) == {"x8", "x7"}, new_factor_set.factors)
)[0]
self.assertEqual(
self.phi4.marginalize(["x5"], inplace=False), phi4_equivalent_in_factor_set
)
| 44.718447
| 87
| 0.606817
|
b84c808393ac6a17ecdcd72afe131ec0360b1fb5
| 3,889
|
py
|
Python
|
src/initialization.py
|
farzana0/EvoNRL
|
993560755a6b63b1941fe6d964521ae88a067b47
|
[
"MIT"
] | 3
|
2021-02-16T05:20:57.000Z
|
2021-06-10T09:27:54.000Z
|
src/initialization.py
|
farzana0/EvoNRL
|
993560755a6b63b1941fe6d964521ae88a067b47
|
[
"MIT"
] | 1
|
2021-02-03T07:18:45.000Z
|
2021-02-03T07:18:45.000Z
|
src/initialization.py
|
farzana0/EvoNRL
|
993560755a6b63b1941fe6d964521ae88a067b47
|
[
"MIT"
] | 1
|
2021-06-10T09:27:55.000Z
|
2021-06-10T09:27:55.000Z
|
import argparse
import numpy as np
import networkx as nx
from gensim.models import Word2Vec
from elasticsearch import Elasticsearch
from elasticsearch import helpers
global args
import random
import cPickle
class parse_args():
def __init__(self, input, output, walkfile, vecinput , num_walks, walk_length):
self.weighted=False
self.input = input
self.output = output
self.walksile = walkfile
self.vecinput = vecinput
self.num_walks = num_walks
self.walk_length = walk_length
#building the randomwalk corpus
def random_walk(G, path_length, alpha=0, rand=random.Random(), start=None):
""" Returns a truncated random walk.
path_length: Length of the random walk.
alpha: probability of restarts.
start: the start node of the random walk.
"""
if start:
path = [start]
else:
path = [rand.choice(list(G.nodes()))]
while len(path) < path_length:
cur = path[-1]
if len(list(G[(cur)])) > 0:
if rand.random() >= alpha:
path.append(int(rand.choice(list(G[(cur)]))))
else:
path.append(path[0])
else:
break
return [str(node) for node in path]
def build_random_walk_set(G, num_paths, path_length, alpha=0,
rand=random.Random(0)):
walks = []
nodes = list(G.nodes())
for cnt in range(num_paths):
rand.shuffle(nodes)
for node in nodes:
walks.append(random_walk(G, path_length, rand=rand, alpha=alpha, start=node))
return walks
def elastic_init(walks, ind):
es_init = Elasticsearch(retry_on_timeout=True)
mapp = {
"walk": {
"properties": {
"wlks": {
"type": "text",
"store": True,
"analyzer" : "fulltext_analyzer"
}
}
}
}
sett = {"settings" : {
"index" : {
"blocks.read_only_allow_delete": False,
"number_of_shards" : 5,
"number_of_replicas" : 1
},
"analysis": {
"analyzer": {
"fulltext_analyzer": {
"type": "custom",
"tokenizer": "whitespace",
}
}
}
}
}
try:
es_init.indices.delete(index=ind)
except:
pass
es_init.indices.create(index=ind, body=sett, request_timeout=30)
es_init.indices.put_mapping(index=ind, doc_type='walk', body=mapp)
for j in range(0, len(walks)):
op_dict = {"wlks": " ".join(walks[j])}
es_init.index(index=ind, doc_type='walk', id=j, body=op_dict)
es_init.indices.refresh(index=ind)
return es_init
def learn_embeddings(walks, dimensions, window_size, workers, iteration, output, outputvec, simulatewalks):
'''
Learn embeddings by optimizing the Skipgram objective using SGD.
'''
if simulatewalks:
model = Word2Vec(walks, size=dimensions, window=window_size, min_count=0, sg=1, workers=workers, iter=iteration)
model.train(walks, total_examples=model.corpus_count, epochs=model.iter)
model.save(outputvec)
model.wv.save_word2vec_format(output + 'main')
else:
model = Word2Vec(walks, size=dimensions, window=10, min_count=0, sg=1, workers=1, iter=1)
model.save(outputvec)
model.wv.save_word2vec_format(output + 'main')
model.train(walks, total_examples=model.corpus_count, epochs=model.iter)
vocab =[]
keys = []
for key in sorted(model.wv.vocab, key=lambda x: int(x)):
keys.append(key)
vocab.append(np.array(model[key], dtype=float))
return np.array(vocab), keys
def main(g, indexx, num_walks, walk_length, outputvec, output, dimensions, window_size, workers, iteration, simulatewalks, walkfile):
if simulatewalks:
walks = build_random_walk_set(g, num_walks, walk_length, alpha=0, rand=random.Random(0))
with open(walkfile, 'wb') as pf:
cPickle.dump(walks, pf)
else:
with open(walkfile, 'rb') as pf:
walks = cPickle.load(pf)
pf = open(walkfile, 'rb')
walks = cPickle.load(pf)
pf.close()
walks = [map(str, walk) for walk in walks]
lr, keys = learn_embeddings(walks, dimensions, window_size, workers, iteration, output, outputvec, simulatewalks)
es_init = elastic_init(walks, indexx)
return walks, es_init, lr, keys
| 27.58156
| 133
| 0.696066
|
8bcdc80d4a70b9baa7ee62c7586b5cd76acce39d
| 2,979
|
py
|
Python
|
AutoAcademicCV/textfile.py
|
german-arroyo-moreno/AutoAcademia
|
3f51eb8b4c6706ed5ccc71bb57bf2d478e7cae17
|
[
"MIT"
] | null | null | null |
AutoAcademicCV/textfile.py
|
german-arroyo-moreno/AutoAcademia
|
3f51eb8b4c6706ed5ccc71bb57bf2d478e7cae17
|
[
"MIT"
] | null | null | null |
AutoAcademicCV/textfile.py
|
german-arroyo-moreno/AutoAcademia
|
3f51eb8b4c6706ed5ccc71bb57bf2d478e7cae17
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
Copyright 2018 (c) Germán Arroyo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
def openFile(fileName):
"""
" Open a file or exit of the program,
" return the handler of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading file '" + fileName + "'. ABORT.")
sys.exit(-1)
return finput
def openTxtFile(fileName):
"""
" Open a file or exit of the program,
" return the text of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.read()
finput.close()
return text
def openLinesTxtFile(fileName):
"""
" Open a file or exit of the program,
" return a list of lines of text of the file
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.readlines()
finput.close()
return text
def saveTxtFile(fileName, text, append=False):
"""
" Open a file or exit of the program,
" return the text of the file
"""
try:
if append:
foutput = open(fileName, 'a')
else:
foutput = open(fileName, 'w')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
foutput.write(text)
foutput.close()
return
def openListFile(fileName, delim=','):
"""
" Open a file or exit of the program,
" return a list separated by delim
"""
try:
finput = open(fileName, 'r')
except IOError:
print("Error loading text file '" + fileName + "'. ABORT.")
sys.exit(-1)
text = finput.read()
listT = text.split(delim)
listT = [item.replace('\n', '').replace('\r','').strip() for item in listT]
finput.close()
return listT
| 31.691489
| 460
| 0.645854
|
d057fe577b58506a6453e541206b6b84fe3b0fce
| 5,940
|
py
|
Python
|
pex/tools/commands/graph.py
|
zmanji/pex
|
e43b9da471a79ab11d905956a40583a420992b28
|
[
"Apache-2.0"
] | null | null | null |
pex/tools/commands/graph.py
|
zmanji/pex
|
e43b9da471a79ab11d905956a40583a420992b28
|
[
"Apache-2.0"
] | null | null | null |
pex/tools/commands/graph.py
|
zmanji/pex
|
e43b9da471a79ab11d905956a40583a420992b28
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import logging
import os
import tempfile
import threading
from argparse import ArgumentParser, Namespace
from contextlib import contextmanager
from pex.common import safe_mkdir
from pex.dist_metadata import requires_dists
from pex.pex import PEX
from pex.tools.command import Command, Ok, OutputMixin, Result, try_open_file, try_run_program
from pex.tools.commands.digraph import DiGraph
from pex.typing import TYPE_CHECKING
from pex.variables import ENV
if TYPE_CHECKING:
from typing import Iterator, IO, Tuple
logger = logging.getLogger(__name__)
class Graph(OutputMixin, Command):
"""Generates a dot graph of the dependencies contained in a PEX file."""
@staticmethod
def _create_dependency_graph(pex):
# type: (PEX) -> DiGraph
graph = DiGraph(
pex.path(),
fontsize="14",
labelloc="t",
label="Dependency graph of {} for interpreter {} ({})".format(
pex.path(), pex.interpreter.binary, pex.interpreter.identity.requirement
),
)
marker_environment = pex.interpreter.identity.env_markers.copy()
marker_environment["extra"] = ""
present_dists = frozenset(dist.project_name for dist in pex.resolve())
for dist in pex.resolve():
graph.add_node(
name=dist.project_name,
label="{name} {version}".format(name=dist.project_name, version=dist.version),
URL="https://pypi.org/project/{name}/{version}".format(
name=dist.project_name, version=dist.version
),
target="_blank",
)
for req in requires_dists(dist):
if (
req.project_name not in present_dists
and req.marker
and not req.marker.evaluate(environment=marker_environment)
):
graph.add_node(
name=req.project_name,
color="lightgrey",
style="filled",
tooltip="inactive requirement",
URL="https://pypi.org/project/{name}".format(name=req.project_name),
target="_blank",
)
graph.add_edge(
start=dist.project_name,
end=req.project_name,
label="{specifier}{marker}".format(
specifier=req.specifier if req.specifier else "",
marker="; {}".format(req.marker) if req.marker else "",
)
if (req.specifier or req.marker)
else None,
fontsize="10",
)
return graph
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_output_option(parser, entity="dot graph")
parser.add_argument(
"-r",
"--render",
action="store_true",
help="Attempt to render the graph.",
)
parser.add_argument(
"-f",
"--format",
default="svg",
help="The format to render the graph in.",
)
parser.add_argument(
"--open",
action="store_true",
help="Attempt to open the graph in the system viewer (implies --render).",
)
@staticmethod
def _dot(
options, # type: Namespace
graph, # type: DiGraph
render_fp, # type: IO
):
# type: (...) -> Result
read_fd, write_fd = os.pipe()
def emit():
with os.fdopen(write_fd, "w") as fp:
graph.emit(fp)
emit_thread = threading.Thread(name="{} Emitter".format(__name__), target=emit)
emit_thread.daemon = True
emit_thread.start()
try:
return try_run_program(
"dot",
url="https://graphviz.org/",
error="Failed to render dependency graph for {}.".format(graph.name),
args=["-T", options.format],
stdin=read_fd,
stdout=render_fp,
)
finally:
emit_thread.join()
@contextmanager
def _output_for_open(self, options):
# type: (Namespace) -> Iterator[Tuple[IO, str]]
if self.is_stdout(options):
tmpdir = os.path.join(ENV.PEX_ROOT, "tmp")
safe_mkdir(tmpdir)
with tempfile.NamedTemporaryFile(
prefix="{}.".format(__name__),
suffix=".deps.{}".format(options.format),
dir=tmpdir,
delete=False,
) as tmp_out:
yield tmp_out, tmp_out.name
return
with self.output(options, binary=True) as out:
yield out, out.name
def run(
self,
pex, # type: PEX
options, # type: Namespace
):
# type: (...) -> Result
graph = self._create_dependency_graph(pex)
if not (options.render or options.open):
with self.output(options) as out:
graph.emit(out)
return Ok()
if not options.open:
with self.output(options, binary=True) as out:
return self._dot(options, graph, out)
with self._output_for_open(options) as (out, open_path):
result = self._dot(options, graph, out)
if result.is_error:
return result
return try_open_file(
open_path,
error="Failed to open dependency graph of {} rendered in {} for viewing.".format(
pex.path(), open_path
),
)
| 33.942857
| 94
| 0.538384
|
2c416affd035c63233f5388582acbacf9bab9c21
| 3,762
|
py
|
Python
|
web_features/support/screen.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 5
|
2017-11-10T12:48:10.000Z
|
2018-02-21T21:29:52.000Z
|
web_features/support/screen.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 1
|
2018-04-22T00:08:16.000Z
|
2018-04-22T00:08:16.000Z
|
web_features/support/screen.py
|
alexgarzao/beeweb
|
c67d024c2d43c6bb2da7ba6877c1648e8760f036
|
[
"MIT"
] | 2
|
2017-11-10T17:06:34.000Z
|
2017-11-11T03:27:46.000Z
|
from selenium.webdriver.support.ui import WebDriverWait
from .element import IdElement, TextElement, NameElement, XpathElement, AutomationIdElement, ClassNameElement, \
CssSelectorElement
from features.support.actions import Actions
from features.support.definition import Definition
class Screen(Definition):
def __init__(self, driver, name):
super().__init__()
self.driver = driver
self.name = name
self.url = ""
self.elements = {}
self.actions = Actions()
def add_id_element(self, name, internal_id, ignore_displayed=False):
self.__add_element(name, IdElement(self.driver, self.name, name, internal_id, ignore_displayed))
def add_text_element(self, name, internal_id, ignore_displayed=False):
self.__add_element(name, TextElement(self.driver, self.name, name, internal_id, ignore_displayed))
def add_name_element(self, name, internal_id, ignore_displayed=False):
self.__add_element(name, NameElement(self.driver, self.name, name, internal_id, ignore_displayed))
def add_xpath_element(self, name, internal_id, ignore_displayed=False):
self.__add_element(name, XpathElement(self.driver, self.name, name, internal_id, ignore_displayed))
def add_automation_id_element(self, name, internal_id, ignore_displayed=False):
self.__add_element(name, AutomationIdElement(self.driver, self.name, name, internal_id, ignore_displayed))
def add_class_name_element(self, name, class_name, ignore_displayed=False):
self.__add_element(name, ClassNameElement(self.driver, self.name, name, class_name, ignore_displayed))
def add_css_selector_element(self, name, css, ignore_displayed=False):
self.__add_element(name, CssSelectorElement(self.driver, self.name, name, css, ignore_displayed))
def add_action(self, name):
self.actions.add_action(name)
def add_event_in_action(self, action_name, event):
self.actions.add_event(action_name, event)
def get_steps_to_execute(self, action_name):
return self.actions.get_steps_to_execute(action_name)
def find_element(self, name, parameter=None):
name = name.lower()
element = self.elements.get(name)
if element is None:
possible = ','.join(list(self.elements))
raise ElementNotFoundException("Element {} not found. Possible values: {}".format(name, possible))
element.inc_uses_number()
self.__wait_for_ajax()
return element.find_element(parameter)
def get_element(self, name):
name = name.lower()
return self.elements.get(name)
def set_url(self, url):
self.url = url
def get_url(self):
return self.url
def get_name(self):
return self.name
def get_unused_elements(self):
unused_elements = [element for key, element in self.elements.items() if element.get_uses_number() == 0]
return unused_elements
def get_unused_actions(self):
return self.actions.get_unused_actions()
def __add_element(self, name, new_element):
name = name.lower()
if self.elements.get(name) is not None:
raise DuplicatedElementException("Element {} already exists".format(name))
self.elements[name] = new_element
def __wait_for_ajax(self):
wait = WebDriverWait(self.driver, 10)
try:
wait.until(lambda driver: self.driver.execute_script("return jQuery.active == 0"))
wait.until(lambda driver: self.driver.execute_script('return document.readyState == "complete"'))
except Exception as e:
pass
class DuplicatedElementException(Exception):
pass
class ElementNotFoundException(Exception):
pass
| 36.882353
| 114
| 0.704413
|
38a95b7759eeba5bc378847f907bb5b52f9ef4ad
| 3,595
|
py
|
Python
|
Graph/MapGraph.py
|
King-ofwolf/PACOS
|
730a6d341640d6f94a8b3d24c0336d71f3b1689e
|
[
"MIT"
] | 2
|
2019-06-03T12:52:05.000Z
|
2019-12-12T02:17:41.000Z
|
Graph/MapGraph.py
|
King-ofwolf/PACOS
|
730a6d341640d6f94a8b3d24c0336d71f3b1689e
|
[
"MIT"
] | 1
|
2019-06-01T16:47:57.000Z
|
2019-06-03T07:42:26.000Z
|
Graph/MapGraph.py
|
King-ofwolf/PACOS
|
730a6d341640d6f94a8b3d24c0336d71f3b1689e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: kingofwolf
# @Date: 2019-04-04 16:55:08
# @Last Modified by: kingofwolf
# @Last Modified time: 2019-05-24 16:17:34
# @Email: wangshenglingQQ@163.com
'Info: a Python file '
__author__ = 'Wang'
__version__= '2019-04-04 16:55:08'
from pyecharts import Scatter
from pyecharts import Tree
import json
import codecs
import os
def GenerateScatter(ST,outpath='./mapgraph.html'):
taskNo = [i for i in range(len(ST))]
netNo=ST
x=taskNo
y=netNo
symbol_size=(10-len(x)/100) if len(x)<1000 else 1
scatter = Scatter(title="Topology Mapping Scatter",subtitle="x:task No.\ny:core No.")
scatter.add("core No.", x, y,
symbol_size=symbol_size,
is_toolbox_show=False,
)
scatter.render(outpath)
def GenerateTree_json(ST,Net,outpath='./tree.json'):
mapjson={'treedics':[]}
treedic={'children':[],'name':''}
mapjson['treedics'].append(treedic)
tmp_dic_list=mapjson['treedics'] #all children nodes
nodes=0
depath=0
if Net==[]:
treedic['name']='No Graph to show'
nodes=5
elif Net[0] == 'tleaf':
netdepth=Net[1]
for nodes in Net[2:netdepth+1]:
tmp_list=[] #new children nodes
for j in tmp_dic_list:
for i in range(int(nodes)):
tmp_dic={'children':[],'name':''}
j['children'].append(tmp_dic)
tmp_list.append(tmp_dic)
tmp_dic_list=tmp_list
nodes=Net[netdepth+1]
clk=0
for j in tmp_dic_list:
for i in range(int(nodes)):
j['name']+=str(ST.index(clk))+','
clk+=1
j['name']=j['name'][0:-1]
nodes=len(tmp_dic_list)
depath=netdepth
elif Net[0] == 'matrix':
tcores=Net[1]
nodes=Net[2]
cores=Net[3]
depath=2
clk=0
for i in range(nodes):
treedic['children'].append({'children':[],'name':''})
for i in treedic['children']:
for core in range(cores):
# i['children'].append({'children':[],'name':str(ST.index(clk)),'vaule':clk})
i['name']+=str(ST.index(clk))+','
clk+=1
if clk >=tcores:
break
i['name']=i['name'][0:-1]
with codecs.open(outpath,"w",encoding="utf-8") as f:
json.dump(mapjson,f,ensure_ascii=False)
return nodes,depath
def GenerateTree(ST,Net,outpath='./treegraph.html'):
outpathdir=os.path.dirname(outpath)
nodes,depath=GenerateTree_json(ST,Net,outpath=os.path.join(outpathdir,'tree.json'))
with codecs.open(os.path.join(outpathdir,'tree.json'), "r", encoding="utf-8") as f:
j = json.load(f)
data=j['treedics']
tree = Tree(width=600, height=nodes*20)
tree.add("Topology Mapping result", data,
tree_collapse_interval=0,
tree_right="60%",
tree_symbol_size=10,
is_toolbox_show=False)
tree.render(outpath)
if __name__ == '__main__':
# st=[23,17,15,9,19,5,14,13,12,10,6,3,21,7,16,2,4,18,0,22,20,1,8,11,85,76,95,89,78,81,91,86,74,88,77,92,90,84,79,83,73,72,93,75,87,94,82,80,59,48,68,55,49,58,56,50,71,67,61,66,65,69,63,57,54,70,60,64,52,53,51,62,40,44,45,37,27,38,25,34,43,41,35,31,32,39,26,28,30,47,33,36,29,46,42,24,115,97,106,116,98,112,99,96,103,100,105,113,119,102,114,118,110,109,111,117,104,101,108,107,127,122,121,125,123,120,124,126]
# GenerateScatter(st)
st=[23,17,15,9,19,5,14,13,12,10,6,3,21,7,16,2,4,18,0,22,20,1,8,11,85,76,95,89,78,81,91,86,74,88,77,92,90,84,79,83,73,72,93,75,87,94,82,80,59,48,68,55,49,58,56,50,71,67,61,66,65,69,63,57,54,70,60,64,52,53,51,62,40,44,45,37,27,38,25,34,43,41,35,31,32,39,26,28,30,47,33,36,29,46,42,24,115,97,106,116,98,112,99,96,103,100,105,113,119,102,114,118,110,109,111,117,104,101,108,107,127,122,121,125,123,120,124,126]
net=['tleaf',4,16,2,2,2]
net=['matrix',128,7,20]
# st=[i for i in range(32)]
# net=['tleaf',4,4,2,2,2]
GenerateTree(st,net)
| 32.981651
| 409
| 0.668428
|
051e260b17c01fcd9847f9a58d73a36c0669462f
| 33,369
|
py
|
Python
|
pytorch/pytorchcv/model_provider.py
|
HyperGAN/imgclsmob
|
88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3
|
[
"MIT"
] | null | null | null |
pytorch/pytorchcv/model_provider.py
|
HyperGAN/imgclsmob
|
88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3
|
[
"MIT"
] | null | null | null |
pytorch/pytorchcv/model_provider.py
|
HyperGAN/imgclsmob
|
88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3
|
[
"MIT"
] | null | null | null |
from .models.alexnet import *
from .models.zfnet import *
from .models.vgg import *
from .models.bninception import *
from .models.resnet import *
from .models.preresnet import *
from .models.resnext import *
from .models.seresnet import *
from .models.sepreresnet import *
from .models.seresnext import *
from .models.senet import *
from .models.ibnresnet import *
from .models.ibnbresnet import *
from .models.ibnresnext import *
from .models.ibndensenet import *
from .models.airnet import *
from .models.airnext import *
from .models.bamresnet import *
from .models.cbamresnet import *
from .models.resattnet import *
from .models.sknet import *
from .models.diaresnet import *
from .models.diapreresnet import *
from .models.pyramidnet import *
from .models.diracnetv2 import *
from .models.sharesnet import *
from .models.densenet import *
from .models.condensenet import *
from .models.sparsenet import *
from .models.peleenet import *
from .models.wrn import *
from .models.drn import *
from .models.dpn import *
from .models.darknet import *
from .models.darknet53 import *
from .models.channelnet import *
from .models.isqrtcovresnet import *
from .models.revnet import *
from .models.irevnet import *
from .models.bagnet import *
from .models.dla import *
from .models.msdnet import *
from .models.fishnet import *
from .models.espnetv2 import *
from .models.xdensenet import *
from .models.squeezenet import *
from .models.squeezenext import *
from .models.shufflenet import *
from .models.shufflenetv2 import *
from .models.shufflenetv2b import *
from .models.menet import *
from .models.mobilenet import *
from .models.mobilenetv2 import *
from .models.mobilenetv3 import *
from .models.igcv3 import *
from .models.mnasnet import *
from .models.darts import *
from .models.proxylessnas import *
from .models.xception import *
from .models.inceptionv3 import *
from .models.inceptionv4 import *
from .models.inceptionresnetv2 import *
from .models.polynet import *
from .models.nasnet import *
from .models.pnasnet import *
from .models.efficientnet import *
from .models.nin_cifar import *
from .models.resnet_cifar import *
from .models.preresnet_cifar import *
from .models.resnext_cifar import *
from .models.seresnet_cifar import *
from .models.sepreresnet_cifar import *
from .models.pyramidnet_cifar import *
from .models.densenet_cifar import *
from .models.xdensenet_cifar import *
from .models.wrn_cifar import *
from .models.wrn1bit_cifar import *
from .models.ror_cifar import *
from .models.rir_cifar import *
from .models.msdnet_cifar10 import *
from .models.resdropresnet_cifar import *
from .models.shakeshakeresnet_cifar import *
from .models.shakedropresnet_cifar import *
from .models.fractalnet_cifar import *
from .models.diaresnet_cifar import *
from .models.diapreresnet_cifar import *
from .models.octresnet import *
from .models.resnetd import *
from .models.resnet_cub import *
from .models.seresnet_cub import *
from .models.mobilenet_cub import *
from .models.proxylessnas_cub import *
from .models.ntsnet_cub import *
from .models.fcn8sd import *
from .models.pspnet import *
from .models.deeplabv3 import *
from .models.superpointnet import *
# from .models.others.oth_superpointnet import *
__all__ = ['get_model']
_models = {
'alexnet': alexnet,
'alexnetb': alexnetb,
'zfnet': zfnet,
'zfnetb': zfnetb,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'bn_vgg11': bn_vgg11,
'bn_vgg13': bn_vgg13,
'bn_vgg16': bn_vgg16,
'bn_vgg19': bn_vgg19,
'bn_vgg11b': bn_vgg11b,
'bn_vgg13b': bn_vgg13b,
'bn_vgg16b': bn_vgg16b,
'bn_vgg19b': bn_vgg19b,
'bninception': bninception,
'resnet10': resnet10,
'resnet12': resnet12,
'resnet14': resnet14,
'resnetbc14b': resnetbc14b,
'resnet16': resnet16,
'resnet18_wd4': resnet18_wd4,
'resnet18_wd2': resnet18_wd2,
'resnet18_w3d4': resnet18_w3d4,
'resnet18': resnet18,
'resnet26': resnet26,
'resnetbc26b': resnetbc26b,
'resnet34': resnet34,
'resnetbc38b': resnetbc38b,
'resnet50': resnet50,
'resnet50b': resnet50b,
'resnet101': resnet101,
'resnet101b': resnet101b,
'resnet152': resnet152,
'resnet152b': resnet152b,
'resnet200': resnet200,
'resnet200b': resnet200b,
'preresnet10': preresnet10,
'preresnet12': preresnet12,
'preresnet14': preresnet14,
'preresnetbc14b': preresnetbc14b,
'preresnet16': preresnet16,
'preresnet18_wd4': preresnet18_wd4,
'preresnet18_wd2': preresnet18_wd2,
'preresnet18_w3d4': preresnet18_w3d4,
'preresnet18': preresnet18,
'preresnet26': preresnet26,
'preresnetbc26b': preresnetbc26b,
'preresnet34': preresnet34,
'preresnetbc38b': preresnetbc38b,
'preresnet50': preresnet50,
'preresnet50b': preresnet50b,
'preresnet101': preresnet101,
'preresnet101b': preresnet101b,
'preresnet152': preresnet152,
'preresnet152b': preresnet152b,
'preresnet200': preresnet200,
'preresnet200b': preresnet200b,
'preresnet269b': preresnet269b,
'resnext14_16x4d': resnext14_16x4d,
'resnext14_32x2d': resnext14_32x2d,
'resnext14_32x4d': resnext14_32x4d,
'resnext26_16x4d': resnext26_16x4d,
'resnext26_32x2d': resnext26_32x2d,
'resnext26_32x4d': resnext26_32x4d,
'resnext38_32x4d': resnext38_32x4d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'seresnet10': seresnet10,
'seresnet12': seresnet12,
'seresnet14': seresnet14,
'seresnet16': seresnet16,
'seresnet18': seresnet18,
'seresnet26': seresnet26,
'seresnetbc26b': seresnetbc26b,
'seresnet34': seresnet34,
'seresnetbc38b': seresnetbc38b,
'seresnet50': seresnet50,
'seresnet50b': seresnet50b,
'seresnet101': seresnet101,
'seresnet101b': seresnet101b,
'seresnet152': seresnet152,
'seresnet152b': seresnet152b,
'seresnet200': seresnet200,
'seresnet200b': seresnet200b,
'sepreresnet10': sepreresnet10,
'sepreresnet12': sepreresnet12,
'sepreresnet14': sepreresnet14,
'sepreresnet16': sepreresnet16,
'sepreresnet18': sepreresnet18,
'sepreresnet26': sepreresnet26,
'sepreresnetbc26b': sepreresnetbc26b,
'sepreresnet34': sepreresnet34,
'sepreresnetbc38b': sepreresnetbc38b,
'sepreresnet50': sepreresnet50,
'sepreresnet50b': sepreresnet50b,
'sepreresnet101': sepreresnet101,
'sepreresnet101b': sepreresnet101b,
'sepreresnet152': sepreresnet152,
'sepreresnet152b': sepreresnet152b,
'sepreresnet200': sepreresnet200,
'sepreresnet200b': sepreresnet200b,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'senet16': senet16,
'senet28': senet28,
'senet40': senet40,
'senet52': senet52,
'senet103': senet103,
'senet154': senet154,
'ibn_resnet50': ibn_resnet50,
'ibn_resnet101': ibn_resnet101,
'ibn_resnet152': ibn_resnet152,
'ibnb_resnet50': ibnb_resnet50,
'ibnb_resnet101': ibnb_resnet101,
'ibnb_resnet152': ibnb_resnet152,
'ibn_resnext50_32x4d': ibn_resnext50_32x4d,
'ibn_resnext101_32x4d': ibn_resnext101_32x4d,
'ibn_resnext101_64x4d': ibn_resnext101_64x4d,
'ibn_densenet121': ibn_densenet121,
'ibn_densenet161': ibn_densenet161,
'ibn_densenet169': ibn_densenet169,
'ibn_densenet201': ibn_densenet201,
'airnet50_1x64d_r2': airnet50_1x64d_r2,
'airnet50_1x64d_r16': airnet50_1x64d_r16,
'airnet101_1x64d_r2': airnet101_1x64d_r2,
'airnext50_32x4d_r2': airnext50_32x4d_r2,
'airnext101_32x4d_r2': airnext101_32x4d_r2,
'airnext101_32x4d_r16': airnext101_32x4d_r16,
'bam_resnet18': bam_resnet18,
'bam_resnet34': bam_resnet34,
'bam_resnet50': bam_resnet50,
'bam_resnet101': bam_resnet101,
'bam_resnet152': bam_resnet152,
'cbam_resnet18': cbam_resnet18,
'cbam_resnet34': cbam_resnet34,
'cbam_resnet50': cbam_resnet50,
'cbam_resnet101': cbam_resnet101,
'cbam_resnet152': cbam_resnet152,
'resattnet56': resattnet56,
'resattnet92': resattnet92,
'resattnet128': resattnet128,
'resattnet164': resattnet164,
'resattnet200': resattnet200,
'resattnet236': resattnet236,
'resattnet452': resattnet452,
'sknet50': sknet50,
'sknet101': sknet101,
'sknet152': sknet152,
'diaresnet10': diaresnet10,
'diaresnet12': diaresnet12,
'diaresnet14': diaresnet14,
'diaresnetbc14b': diaresnetbc14b,
'diaresnet16': diaresnet16,
'diaresnet18': diaresnet18,
'diaresnet26': diaresnet26,
'diaresnetbc26b': diaresnetbc26b,
'diaresnet34': diaresnet34,
'diaresnetbc38b': diaresnetbc38b,
'diaresnet50': diaresnet50,
'diaresnet50b': diaresnet50b,
'diaresnet101': diaresnet101,
'diaresnet101b': diaresnet101b,
'diaresnet152': diaresnet152,
'diaresnet152b': diaresnet152b,
'diaresnet200': diaresnet200,
'diaresnet200b': diaresnet200b,
'diapreresnet10': diapreresnet10,
'diapreresnet12': diapreresnet12,
'diapreresnet14': diapreresnet14,
'diapreresnetbc14b': diapreresnetbc14b,
'diapreresnet16': diapreresnet16,
'diapreresnet18': diapreresnet18,
'diapreresnet26': diapreresnet26,
'diapreresnetbc26b': diapreresnetbc26b,
'diapreresnet34': diapreresnet34,
'diapreresnetbc38b': diapreresnetbc38b,
'diapreresnet50': diapreresnet50,
'diapreresnet50b': diapreresnet50b,
'diapreresnet101': diapreresnet101,
'diapreresnet101b': diapreresnet101b,
'diapreresnet152': diapreresnet152,
'diapreresnet152b': diapreresnet152b,
'diapreresnet200': diapreresnet200,
'diapreresnet200b': diapreresnet200b,
'diapreresnet269b': diapreresnet269b,
'pyramidnet101_a360': pyramidnet101_a360,
'diracnet18v2': diracnet18v2,
'diracnet34v2': diracnet34v2,
'sharesnet18': sharesnet18,
'sharesnet34': sharesnet34,
'sharesnet50': sharesnet50,
'sharesnet50b': sharesnet50b,
'sharesnet101': sharesnet101,
'sharesnet101b': sharesnet101b,
'sharesnet152': sharesnet152,
'sharesnet152b': sharesnet152b,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'condensenet74_c4_g4': condensenet74_c4_g4,
'condensenet74_c8_g8': condensenet74_c8_g8,
'sparsenet121': sparsenet121,
'sparsenet161': sparsenet161,
'sparsenet169': sparsenet169,
'sparsenet201': sparsenet201,
'sparsenet264': sparsenet264,
'peleenet': peleenet,
'wrn50_2': wrn50_2,
'drnc26': drnc26,
'drnc42': drnc42,
'drnc58': drnc58,
'drnd22': drnd22,
'drnd38': drnd38,
'drnd54': drnd54,
'drnd105': drnd105,
'dpn68': dpn68,
'dpn68b': dpn68b,
'dpn98': dpn98,
'dpn107': dpn107,
'dpn131': dpn131,
'darknet_ref': darknet_ref,
'darknet_tiny': darknet_tiny,
'darknet19': darknet19,
'darknet53': darknet53,
'channelnet': channelnet,
'revnet38': revnet38,
'revnet110': revnet110,
'revnet164': revnet164,
'irevnet301': irevnet301,
'bagnet9': bagnet9,
'bagnet17': bagnet17,
'bagnet33': bagnet33,
'dla34': dla34,
'dla46c': dla46c,
'dla46xc': dla46xc,
'dla60': dla60,
'dla60x': dla60x,
'dla60xc': dla60xc,
'dla102': dla102,
'dla102x': dla102x,
'dla102x2': dla102x2,
'dla169': dla169,
'msdnet22': msdnet22,
'fishnet99': fishnet99,
'fishnet150': fishnet150,
'espnetv2_wd2': espnetv2_wd2,
'espnetv2_w1': espnetv2_w1,
'espnetv2_w5d4': espnetv2_w5d4,
'espnetv2_w3d2': espnetv2_w3d2,
'espnetv2_w2': espnetv2_w2,
'xdensenet121_2': xdensenet121_2,
'xdensenet161_2': xdensenet161_2,
'xdensenet169_2': xdensenet169_2,
'xdensenet201_2': xdensenet201_2,
'squeezenet_v1_0': squeezenet_v1_0,
'squeezenet_v1_1': squeezenet_v1_1,
'squeezeresnet_v1_0': squeezeresnet_v1_0,
'squeezeresnet_v1_1': squeezeresnet_v1_1,
'sqnxt23_w1': sqnxt23_w1,
'sqnxt23_w3d2': sqnxt23_w3d2,
'sqnxt23_w2': sqnxt23_w2,
'sqnxt23v5_w1': sqnxt23v5_w1,
'sqnxt23v5_w3d2': sqnxt23v5_w3d2,
'sqnxt23v5_w2': sqnxt23v5_w2,
'shufflenet_g1_w1': shufflenet_g1_w1,
'shufflenet_g2_w1': shufflenet_g2_w1,
'shufflenet_g3_w1': shufflenet_g3_w1,
'shufflenet_g4_w1': shufflenet_g4_w1,
'shufflenet_g8_w1': shufflenet_g8_w1,
'shufflenet_g1_w3d4': shufflenet_g1_w3d4,
'shufflenet_g3_w3d4': shufflenet_g3_w3d4,
'shufflenet_g1_wd2': shufflenet_g1_wd2,
'shufflenet_g3_wd2': shufflenet_g3_wd2,
'shufflenet_g1_wd4': shufflenet_g1_wd4,
'shufflenet_g3_wd4': shufflenet_g3_wd4,
'shufflenetv2_wd2': shufflenetv2_wd2,
'shufflenetv2_w1': shufflenetv2_w1,
'shufflenetv2_w3d2': shufflenetv2_w3d2,
'shufflenetv2_w2': shufflenetv2_w2,
'shufflenetv2b_wd2': shufflenetv2b_wd2,
'shufflenetv2b_w1': shufflenetv2b_w1,
'shufflenetv2b_w3d2': shufflenetv2b_w3d2,
'shufflenetv2b_w2': shufflenetv2b_w2,
'menet108_8x1_g3': menet108_8x1_g3,
'menet128_8x1_g4': menet128_8x1_g4,
'menet160_8x1_g8': menet160_8x1_g8,
'menet228_12x1_g3': menet228_12x1_g3,
'menet256_12x1_g4': menet256_12x1_g4,
'menet348_12x1_g3': menet348_12x1_g3,
'menet352_12x1_g8': menet352_12x1_g8,
'menet456_24x1_g3': menet456_24x1_g3,
'mobilenet_w1': mobilenet_w1,
'mobilenet_w3d4': mobilenet_w3d4,
'mobilenet_wd2': mobilenet_wd2,
'mobilenet_wd4': mobilenet_wd4,
'fdmobilenet_w1': fdmobilenet_w1,
'fdmobilenet_w3d4': fdmobilenet_w3d4,
'fdmobilenet_wd2': fdmobilenet_wd2,
'fdmobilenet_wd4': fdmobilenet_wd4,
'mobilenetv2_w1': mobilenetv2_w1,
'mobilenetv2_w3d4': mobilenetv2_w3d4,
'mobilenetv2_wd2': mobilenetv2_wd2,
'mobilenetv2_wd4': mobilenetv2_wd4,
'mobilenetv3_small_w7d20': mobilenetv3_small_w7d20,
'mobilenetv3_small_wd2': mobilenetv3_small_wd2,
'mobilenetv3_small_w3d4': mobilenetv3_small_w3d4,
'mobilenetv3_small_w1': mobilenetv3_small_w1,
'mobilenetv3_small_w5d4': mobilenetv3_small_w5d4,
'mobilenetv3_large_w7d20': mobilenetv3_large_w7d20,
'mobilenetv3_large_wd2': mobilenetv3_large_wd2,
'mobilenetv3_large_w3d4': mobilenetv3_large_w3d4,
'mobilenetv3_large_w1': mobilenetv3_large_w1,
'mobilenetv3_large_w5d4': mobilenetv3_large_w5d4,
'igcv3_w1': igcv3_w1,
'igcv3_w3d4': igcv3_w3d4,
'igcv3_wd2': igcv3_wd2,
'igcv3_wd4': igcv3_wd4,
'mnasnet': mnasnet,
'darts': darts,
'proxylessnas_cpu': proxylessnas_cpu,
'proxylessnas_gpu': proxylessnas_gpu,
'proxylessnas_mobile': proxylessnas_mobile,
'proxylessnas_mobile14': proxylessnas_mobile14,
'xception': xception,
'inceptionv3': inceptionv3,
'inceptionv4': inceptionv4,
'inceptionresnetv2': inceptionresnetv2,
'polynet': polynet,
'nasnet_4a1056': nasnet_4a1056,
'nasnet_6a4032': nasnet_6a4032,
'pnasnet5large': pnasnet5large,
'efficientnet_b0': efficientnet_b0,
'efficientnet_b1': efficientnet_b1,
'efficientnet_b2': efficientnet_b2,
'efficientnet_b3': efficientnet_b3,
'efficientnet_b4': efficientnet_b4,
'efficientnet_b5': efficientnet_b5,
'efficientnet_b6': efficientnet_b6,
'efficientnet_b7': efficientnet_b7,
'efficientnet_b0b': efficientnet_b0b,
'efficientnet_b1b': efficientnet_b1b,
'efficientnet_b2b': efficientnet_b2b,
'efficientnet_b3b': efficientnet_b3b,
'efficientnet_b4b': efficientnet_b4b,
'efficientnet_b5b': efficientnet_b5b,
'efficientnet_b6b': efficientnet_b6b,
'efficientnet_b7b': efficientnet_b7b,
'nin_cifar10': nin_cifar10,
'nin_cifar100': nin_cifar100,
'nin_svhn': nin_svhn,
'resnet20_cifar10': resnet20_cifar10,
'resnet20_cifar100': resnet20_cifar100,
'resnet20_svhn': resnet20_svhn,
'resnet56_cifar10': resnet56_cifar10,
'resnet56_cifar100': resnet56_cifar100,
'resnet56_svhn': resnet56_svhn,
'resnet110_cifar10': resnet110_cifar10,
'resnet110_cifar100': resnet110_cifar100,
'resnet110_svhn': resnet110_svhn,
'resnet164bn_cifar10': resnet164bn_cifar10,
'resnet164bn_cifar100': resnet164bn_cifar100,
'resnet164bn_svhn': resnet164bn_svhn,
'resnet272bn_cifar10': resnet272bn_cifar10,
'resnet272bn_cifar100': resnet272bn_cifar100,
'resnet272bn_svhn': resnet272bn_svhn,
'resnet542bn_cifar10': resnet542bn_cifar10,
'resnet542bn_cifar100': resnet542bn_cifar100,
'resnet542bn_svhn': resnet542bn_svhn,
'resnet1001_cifar10': resnet1001_cifar10,
'resnet1001_cifar100': resnet1001_cifar100,
'resnet1001_svhn': resnet1001_svhn,
'resnet1202_cifar10': resnet1202_cifar10,
'resnet1202_cifar100': resnet1202_cifar100,
'resnet1202_svhn': resnet1202_svhn,
'preresnet20_cifar10': preresnet20_cifar10,
'preresnet20_cifar100': preresnet20_cifar100,
'preresnet20_svhn': preresnet20_svhn,
'preresnet56_cifar10': preresnet56_cifar10,
'preresnet56_cifar100': preresnet56_cifar100,
'preresnet56_svhn': preresnet56_svhn,
'preresnet110_cifar10': preresnet110_cifar10,
'preresnet110_cifar100': preresnet110_cifar100,
'preresnet110_svhn': preresnet110_svhn,
'preresnet164bn_cifar10': preresnet164bn_cifar10,
'preresnet164bn_cifar100': preresnet164bn_cifar100,
'preresnet164bn_svhn': preresnet164bn_svhn,
'preresnet272bn_cifar10': preresnet272bn_cifar10,
'preresnet272bn_cifar100': preresnet272bn_cifar100,
'preresnet272bn_svhn': preresnet272bn_svhn,
'preresnet542bn_cifar10': preresnet542bn_cifar10,
'preresnet542bn_cifar100': preresnet542bn_cifar100,
'preresnet542bn_svhn': preresnet542bn_svhn,
'preresnet1001_cifar10': preresnet1001_cifar10,
'preresnet1001_cifar100': preresnet1001_cifar100,
'preresnet1001_svhn': preresnet1001_svhn,
'preresnet1202_cifar10': preresnet1202_cifar10,
'preresnet1202_cifar100': preresnet1202_cifar100,
'preresnet1202_svhn': preresnet1202_svhn,
'resnext20_16x4d_cifar10': resnext20_16x4d_cifar10,
'resnext20_16x4d_cifar100': resnext20_16x4d_cifar100,
'resnext20_16x4d_svhn': resnext20_16x4d_svhn,
'resnext20_32x2d_cifar10': resnext20_32x2d_cifar10,
'resnext20_32x2d_cifar100': resnext20_32x2d_cifar100,
'resnext20_32x2d_svhn': resnext20_32x2d_svhn,
'resnext20_32x4d_cifar10': resnext20_32x4d_cifar10,
'resnext20_32x4d_cifar100': resnext20_32x4d_cifar100,
'resnext20_32x4d_svhn': resnext20_32x4d_svhn,
'resnext29_32x4d_cifar10': resnext29_32x4d_cifar10,
'resnext29_32x4d_cifar100': resnext29_32x4d_cifar100,
'resnext29_32x4d_svhn': resnext29_32x4d_svhn,
'resnext29_16x64d_cifar10': resnext29_16x64d_cifar10,
'resnext29_16x64d_cifar100': resnext29_16x64d_cifar100,
'resnext29_16x64d_svhn': resnext29_16x64d_svhn,
'resnext272_1x64d_cifar10': resnext272_1x64d_cifar10,
'resnext272_1x64d_cifar100': resnext272_1x64d_cifar100,
'resnext272_1x64d_svhn': resnext272_1x64d_svhn,
'resnext272_2x32d_cifar10': resnext272_2x32d_cifar10,
'resnext272_2x32d_cifar100': resnext272_2x32d_cifar100,
'resnext272_2x32d_svhn': resnext272_2x32d_svhn,
'seresnet20_cifar10': seresnet20_cifar10,
'seresnet20_cifar100': seresnet20_cifar100,
'seresnet20_svhn': seresnet20_svhn,
'seresnet56_cifar10': seresnet56_cifar10,
'seresnet56_cifar100': seresnet56_cifar100,
'seresnet56_svhn': seresnet56_svhn,
'seresnet110_cifar10': seresnet110_cifar10,
'seresnet110_cifar100': seresnet110_cifar100,
'seresnet110_svhn': seresnet110_svhn,
'seresnet164bn_cifar10': seresnet164bn_cifar10,
'seresnet164bn_cifar100': seresnet164bn_cifar100,
'seresnet164bn_svhn': seresnet164bn_svhn,
'seresnet272bn_cifar10': seresnet272bn_cifar10,
'seresnet272bn_cifar100': seresnet272bn_cifar100,
'seresnet272bn_svhn': seresnet272bn_svhn,
'seresnet542bn_cifar10': seresnet542bn_cifar10,
'seresnet542bn_cifar100': seresnet542bn_cifar100,
'seresnet542bn_svhn': seresnet542bn_svhn,
'seresnet1001_cifar10': seresnet1001_cifar10,
'seresnet1001_cifar100': seresnet1001_cifar100,
'seresnet1001_svhn': seresnet1001_svhn,
'seresnet1202_cifar10': seresnet1202_cifar10,
'seresnet1202_cifar100': seresnet1202_cifar100,
'seresnet1202_svhn': seresnet1202_svhn,
'sepreresnet20_cifar10': sepreresnet20_cifar10,
'sepreresnet20_cifar100': sepreresnet20_cifar100,
'sepreresnet20_svhn': sepreresnet20_svhn,
'sepreresnet56_cifar10': sepreresnet56_cifar10,
'sepreresnet56_cifar100': sepreresnet56_cifar100,
'sepreresnet56_svhn': sepreresnet56_svhn,
'sepreresnet110_cifar10': sepreresnet110_cifar10,
'sepreresnet110_cifar100': sepreresnet110_cifar100,
'sepreresnet110_svhn': sepreresnet110_svhn,
'sepreresnet164bn_cifar10': sepreresnet164bn_cifar10,
'sepreresnet164bn_cifar100': sepreresnet164bn_cifar100,
'sepreresnet164bn_svhn': sepreresnet164bn_svhn,
'sepreresnet272bn_cifar10': sepreresnet272bn_cifar10,
'sepreresnet272bn_cifar100': sepreresnet272bn_cifar100,
'sepreresnet272bn_svhn': sepreresnet272bn_svhn,
'sepreresnet542bn_cifar10': sepreresnet542bn_cifar10,
'sepreresnet542bn_cifar100': sepreresnet542bn_cifar100,
'sepreresnet542bn_svhn': sepreresnet542bn_svhn,
'sepreresnet1001_cifar10': sepreresnet1001_cifar10,
'sepreresnet1001_cifar100': sepreresnet1001_cifar100,
'sepreresnet1001_svhn': sepreresnet1001_svhn,
'sepreresnet1202_cifar10': sepreresnet1202_cifar10,
'sepreresnet1202_cifar100': sepreresnet1202_cifar100,
'sepreresnet1202_svhn': sepreresnet1202_svhn,
'pyramidnet110_a48_cifar10': pyramidnet110_a48_cifar10,
'pyramidnet110_a48_cifar100': pyramidnet110_a48_cifar100,
'pyramidnet110_a48_svhn': pyramidnet110_a48_svhn,
'pyramidnet110_a84_cifar10': pyramidnet110_a84_cifar10,
'pyramidnet110_a84_cifar100': pyramidnet110_a84_cifar100,
'pyramidnet110_a84_svhn': pyramidnet110_a84_svhn,
'pyramidnet110_a270_cifar10': pyramidnet110_a270_cifar10,
'pyramidnet110_a270_cifar100': pyramidnet110_a270_cifar100,
'pyramidnet110_a270_svhn': pyramidnet110_a270_svhn,
'pyramidnet164_a270_bn_cifar10': pyramidnet164_a270_bn_cifar10,
'pyramidnet164_a270_bn_cifar100': pyramidnet164_a270_bn_cifar100,
'pyramidnet164_a270_bn_svhn': pyramidnet164_a270_bn_svhn,
'pyramidnet200_a240_bn_cifar10': pyramidnet200_a240_bn_cifar10,
'pyramidnet200_a240_bn_cifar100': pyramidnet200_a240_bn_cifar100,
'pyramidnet200_a240_bn_svhn': pyramidnet200_a240_bn_svhn,
'pyramidnet236_a220_bn_cifar10': pyramidnet236_a220_bn_cifar10,
'pyramidnet236_a220_bn_cifar100': pyramidnet236_a220_bn_cifar100,
'pyramidnet236_a220_bn_svhn': pyramidnet236_a220_bn_svhn,
'pyramidnet272_a200_bn_cifar10': pyramidnet272_a200_bn_cifar10,
'pyramidnet272_a200_bn_cifar100': pyramidnet272_a200_bn_cifar100,
'pyramidnet272_a200_bn_svhn': pyramidnet272_a200_bn_svhn,
'densenet40_k12_cifar10': densenet40_k12_cifar10,
'densenet40_k12_cifar100': densenet40_k12_cifar100,
'densenet40_k12_svhn': densenet40_k12_svhn,
'densenet40_k12_bc_cifar10': densenet40_k12_bc_cifar10,
'densenet40_k12_bc_cifar100': densenet40_k12_bc_cifar100,
'densenet40_k12_bc_svhn': densenet40_k12_bc_svhn,
'densenet40_k24_bc_cifar10': densenet40_k24_bc_cifar10,
'densenet40_k24_bc_cifar100': densenet40_k24_bc_cifar100,
'densenet40_k24_bc_svhn': densenet40_k24_bc_svhn,
'densenet40_k36_bc_cifar10': densenet40_k36_bc_cifar10,
'densenet40_k36_bc_cifar100': densenet40_k36_bc_cifar100,
'densenet40_k36_bc_svhn': densenet40_k36_bc_svhn,
'densenet100_k12_cifar10': densenet100_k12_cifar10,
'densenet100_k12_cifar100': densenet100_k12_cifar100,
'densenet100_k12_svhn': densenet100_k12_svhn,
'densenet100_k24_cifar10': densenet100_k24_cifar10,
'densenet100_k24_cifar100': densenet100_k24_cifar100,
'densenet100_k24_svhn': densenet100_k24_svhn,
'densenet100_k12_bc_cifar10': densenet100_k12_bc_cifar10,
'densenet100_k12_bc_cifar100': densenet100_k12_bc_cifar100,
'densenet100_k12_bc_svhn': densenet100_k12_bc_svhn,
'densenet190_k40_bc_cifar10': densenet190_k40_bc_cifar10,
'densenet190_k40_bc_cifar100': densenet190_k40_bc_cifar100,
'densenet190_k40_bc_svhn': densenet190_k40_bc_svhn,
'densenet250_k24_bc_cifar10': densenet250_k24_bc_cifar10,
'densenet250_k24_bc_cifar100': densenet250_k24_bc_cifar100,
'densenet250_k24_bc_svhn': densenet250_k24_bc_svhn,
'xdensenet40_2_k24_bc_cifar10': xdensenet40_2_k24_bc_cifar10,
'xdensenet40_2_k24_bc_cifar100': xdensenet40_2_k24_bc_cifar100,
'xdensenet40_2_k24_bc_svhn': xdensenet40_2_k24_bc_svhn,
'xdensenet40_2_k36_bc_cifar10': xdensenet40_2_k36_bc_cifar10,
'xdensenet40_2_k36_bc_cifar100': xdensenet40_2_k36_bc_cifar100,
'xdensenet40_2_k36_bc_svhn': xdensenet40_2_k36_bc_svhn,
'wrn16_10_cifar10': wrn16_10_cifar10,
'wrn16_10_cifar100': wrn16_10_cifar100,
'wrn16_10_svhn': wrn16_10_svhn,
'wrn28_10_cifar10': wrn28_10_cifar10,
'wrn28_10_cifar100': wrn28_10_cifar100,
'wrn28_10_svhn': wrn28_10_svhn,
'wrn40_8_cifar10': wrn40_8_cifar10,
'wrn40_8_cifar100': wrn40_8_cifar100,
'wrn40_8_svhn': wrn40_8_svhn,
'wrn20_10_1bit_cifar10': wrn20_10_1bit_cifar10,
'wrn20_10_1bit_cifar100': wrn20_10_1bit_cifar100,
'wrn20_10_1bit_svhn': wrn20_10_1bit_svhn,
'wrn20_10_32bit_cifar10': wrn20_10_32bit_cifar10,
'wrn20_10_32bit_cifar100': wrn20_10_32bit_cifar100,
'wrn20_10_32bit_svhn': wrn20_10_32bit_svhn,
'ror3_56_cifar10': ror3_56_cifar10,
'ror3_56_cifar100': ror3_56_cifar100,
'ror3_56_svhn': ror3_56_svhn,
'ror3_110_cifar10': ror3_110_cifar10,
'ror3_110_cifar100': ror3_110_cifar100,
'ror3_110_svhn': ror3_110_svhn,
'ror3_164_cifar10': ror3_164_cifar10,
'ror3_164_cifar100': ror3_164_cifar100,
'ror3_164_svhn': ror3_164_svhn,
'rir_cifar10': rir_cifar10,
'rir_cifar100': rir_cifar100,
'rir_svhn': rir_svhn,
'msdnet22_cifar10': msdnet22_cifar10,
'resdropresnet20_cifar10': resdropresnet20_cifar10,
'resdropresnet20_cifar100': resdropresnet20_cifar100,
'resdropresnet20_svhn': resdropresnet20_svhn,
'shakeshakeresnet20_2x16d_cifar10': shakeshakeresnet20_2x16d_cifar10,
'shakeshakeresnet20_2x16d_cifar100': shakeshakeresnet20_2x16d_cifar100,
'shakeshakeresnet20_2x16d_svhn': shakeshakeresnet20_2x16d_svhn,
'shakeshakeresnet26_2x32d_cifar10': shakeshakeresnet26_2x32d_cifar10,
'shakeshakeresnet26_2x32d_cifar100': shakeshakeresnet26_2x32d_cifar100,
'shakeshakeresnet26_2x32d_svhn': shakeshakeresnet26_2x32d_svhn,
'shakedropresnet20_cifar10': shakedropresnet20_cifar10,
'shakedropresnet20_cifar100': shakedropresnet20_cifar100,
'shakedropresnet20_svhn': shakedropresnet20_svhn,
'fractalnet_cifar10': fractalnet_cifar10,
'fractalnet_cifar100': fractalnet_cifar100,
'diaresnet20_cifar10': diaresnet20_cifar10,
'diaresnet20_cifar100': diaresnet20_cifar100,
'diaresnet20_svhn': diaresnet20_svhn,
'diaresnet56_cifar10': diaresnet56_cifar10,
'diaresnet56_cifar100': diaresnet56_cifar100,
'diaresnet56_svhn': diaresnet56_svhn,
'diaresnet110_cifar10': diaresnet110_cifar10,
'diaresnet110_cifar100': diaresnet110_cifar100,
'diaresnet110_svhn': diaresnet110_svhn,
'diaresnet164bn_cifar10': diaresnet164bn_cifar10,
'diaresnet164bn_cifar100': diaresnet164bn_cifar100,
'diaresnet164bn_svhn': diaresnet164bn_svhn,
'diaresnet1001_cifar10': diaresnet1001_cifar10,
'diaresnet1001_cifar100': diaresnet1001_cifar100,
'diaresnet1001_svhn': diaresnet1001_svhn,
'diaresnet1202_cifar10': diaresnet1202_cifar10,
'diaresnet1202_cifar100': diaresnet1202_cifar100,
'diaresnet1202_svhn': diaresnet1202_svhn,
'diapreresnet20_cifar10': diapreresnet20_cifar10,
'diapreresnet20_cifar100': diapreresnet20_cifar100,
'diapreresnet20_svhn': diapreresnet20_svhn,
'diapreresnet56_cifar10': diapreresnet56_cifar10,
'diapreresnet56_cifar100': diapreresnet56_cifar100,
'diapreresnet56_svhn': diapreresnet56_svhn,
'diapreresnet110_cifar10': diapreresnet110_cifar10,
'diapreresnet110_cifar100': diapreresnet110_cifar100,
'diapreresnet110_svhn': diapreresnet110_svhn,
'diapreresnet164bn_cifar10': diapreresnet164bn_cifar10,
'diapreresnet164bn_cifar100': diapreresnet164bn_cifar100,
'diapreresnet164bn_svhn': diapreresnet164bn_svhn,
'diapreresnet1001_cifar10': diapreresnet1001_cifar10,
'diapreresnet1001_cifar100': diapreresnet1001_cifar100,
'diapreresnet1001_svhn': diapreresnet1001_svhn,
'diapreresnet1202_cifar10': diapreresnet1202_cifar10,
'diapreresnet1202_cifar100': diapreresnet1202_cifar100,
'diapreresnet1202_svhn': diapreresnet1202_svhn,
'isqrtcovresnet18': isqrtcovresnet18,
'isqrtcovresnet34': isqrtcovresnet34,
'isqrtcovresnet50': isqrtcovresnet50,
'isqrtcovresnet50b': isqrtcovresnet50b,
'isqrtcovresnet101': isqrtcovresnet101,
'isqrtcovresnet101b': isqrtcovresnet101b,
'resnetd50b': resnetd50b,
'resnetd101b': resnetd101b,
'resnetd152b': resnetd152b,
'octresnet10_ad2': octresnet10_ad2,
'octresnet50b_ad2': octresnet50b_ad2,
'resnet10_cub': resnet10_cub,
'resnet12_cub': resnet12_cub,
'resnet14_cub': resnet14_cub,
'resnetbc14b_cub': resnetbc14b_cub,
'resnet16_cub': resnet16_cub,
'resnet18_cub': resnet18_cub,
'resnet26_cub': resnet26_cub,
'resnetbc26b_cub': resnetbc26b_cub,
'resnet34_cub': resnet34_cub,
'resnetbc38b_cub': resnetbc38b_cub,
'resnet50_cub': resnet50_cub,
'resnet50b_cub': resnet50b_cub,
'resnet101_cub': resnet101_cub,
'resnet101b_cub': resnet101b_cub,
'resnet152_cub': resnet152_cub,
'resnet152b_cub': resnet152b_cub,
'resnet200_cub': resnet200_cub,
'resnet200b_cub': resnet200b_cub,
'seresnet10_cub': seresnet10_cub,
'seresnet12_cub': seresnet12_cub,
'seresnet14_cub': seresnet14_cub,
'seresnetbc14b_cub': seresnetbc14b_cub,
'seresnet16_cub': seresnet16_cub,
'seresnet18_cub': seresnet18_cub,
'seresnet26_cub': seresnet26_cub,
'seresnetbc26b_cub': seresnetbc26b_cub,
'seresnet34_cub': seresnet34_cub,
'seresnetbc38b_cub': seresnetbc38b_cub,
'seresnet50_cub': seresnet50_cub,
'seresnet50b_cub': seresnet50b_cub,
'seresnet101_cub': seresnet101_cub,
'seresnet101b_cub': seresnet101b_cub,
'seresnet152_cub': seresnet152_cub,
'seresnet152b_cub': seresnet152b_cub,
'seresnet200_cub': seresnet200_cub,
'seresnet200b_cub': seresnet200b_cub,
'mobilenet_w1_cub': mobilenet_w1_cub,
'mobilenet_w3d4_cub': mobilenet_w3d4_cub,
'mobilenet_wd2_cub': mobilenet_wd2_cub,
'mobilenet_wd4_cub': mobilenet_wd4_cub,
'fdmobilenet_w1_cub': fdmobilenet_w1_cub,
'fdmobilenet_w3d4_cub': fdmobilenet_w3d4_cub,
'fdmobilenet_wd2_cub': fdmobilenet_wd2_cub,
'fdmobilenet_wd4_cub': fdmobilenet_wd4_cub,
'proxylessnas_cpu_cub': proxylessnas_cpu_cub,
'proxylessnas_gpu_cub': proxylessnas_gpu_cub,
'proxylessnas_mobile_cub': proxylessnas_mobile_cub,
'proxylessnas_mobile14_cub': proxylessnas_mobile14_cub,
'ntsnet_cub': ntsnet_cub,
'fcn8sd_resnetd50b_voc': fcn8sd_resnetd50b_voc,
'fcn8sd_resnetd101b_voc': fcn8sd_resnetd101b_voc,
'fcn8sd_resnetd50b_coco': fcn8sd_resnetd50b_coco,
'fcn8sd_resnetd101b_coco': fcn8sd_resnetd101b_coco,
'fcn8sd_resnetd50b_ade20k': fcn8sd_resnetd50b_ade20k,
'fcn8sd_resnetd101b_ade20k': fcn8sd_resnetd101b_ade20k,
'fcn8sd_resnetd50b_cityscapes': fcn8sd_resnetd50b_cityscapes,
'fcn8sd_resnetd101b_cityscapes': fcn8sd_resnetd101b_cityscapes,
'pspnet_resnetd50b_voc': pspnet_resnetd50b_voc,
'pspnet_resnetd101b_voc': pspnet_resnetd101b_voc,
'pspnet_resnetd50b_coco': pspnet_resnetd50b_coco,
'pspnet_resnetd101b_coco': pspnet_resnetd101b_coco,
'pspnet_resnetd50b_ade20k': pspnet_resnetd50b_ade20k,
'pspnet_resnetd101b_ade20k': pspnet_resnetd101b_ade20k,
'pspnet_resnetd50b_cityscapes': pspnet_resnetd50b_cityscapes,
'pspnet_resnetd101b_cityscapes': pspnet_resnetd101b_cityscapes,
'deeplabv3_resnetd50b_voc': deeplabv3_resnetd50b_voc,
'deeplabv3_resnetd101b_voc': deeplabv3_resnetd101b_voc,
'deeplabv3_resnetd152b_voc': deeplabv3_resnetd152b_voc,
'deeplabv3_resnetd50b_coco': deeplabv3_resnetd50b_coco,
'deeplabv3_resnetd101b_coco': deeplabv3_resnetd101b_coco,
'deeplabv3_resnetd152b_coco': deeplabv3_resnetd152b_coco,
'deeplabv3_resnetd50b_ade20k': deeplabv3_resnetd50b_ade20k,
'deeplabv3_resnetd101b_ade20k': deeplabv3_resnetd101b_ade20k,
'deeplabv3_resnetd50b_cityscapes': deeplabv3_resnetd50b_cityscapes,
'deeplabv3_resnetd101b_cityscapes': deeplabv3_resnetd101b_cityscapes,
'superpointnet': superpointnet,
# 'oth_superpointnet': oth_superpointnet,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns
-------
Module
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError("Unsupported model: {}".format(name))
net = _models[name](**kwargs)
return net
| 36.31012
| 75
| 0.763133
|
64d682529628358b7708fa54be592ddac85bbbd8
| 3,845
|
py
|
Python
|
configs/retinanet_r50_fpn_1x.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 62
|
2020-04-15T09:01:23.000Z
|
2022-02-24T04:27:52.000Z
|
configs/retinanet_r50_fpn_1x.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 10
|
2020-04-15T09:05:19.000Z
|
2022-01-04T08:05:59.000Z
|
configs/retinanet_r50_fpn_1x.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 10
|
2020-04-28T06:58:09.000Z
|
2021-11-18T00:57:34.000Z
|
# model settings
model = dict(
type='RetinaNet',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False,
# added by WSK
# If use_diff_thr is set to be True, the type of loss_cls must be 'IOUbalancedSigmoidFocalLoss'.
#
use_diff_thr=False,
assginer_cls=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.4,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1)
)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1000, 600),
# img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1000, 600),
# img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1000, 600),
# img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
device_ids = range(8)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/retinanet_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 27.661871
| 100
| 0.604421
|
64564361bb6c1bfba340953e9b3c903948023592
| 8,793
|
py
|
Python
|
python_modules/libraries/dagster-pandas/dagster_pandas_tests/test_structured_df_types.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-10T23:03:37.000Z
|
2020-08-10T23:03:37.000Z
|
python_modules/libraries/dagster-pandas/dagster_pandas_tests/test_structured_df_types.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-pandas/dagster_pandas_tests/test_structured_df_types.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:31.000Z
|
2020-08-20T14:20:31.000Z
|
from dagster_pandas.constraints import (
ColumnWithMetadataException,
ConstraintWithMetadataException,
MultiAggregateConstraintWithMetadata,
MultiColumnConstraintWithMetadata,
StrictColumnsWithMetadata,
all_unique_validator,
column_range_validation_factory,
dtype_in_set_validation_factory,
nonnull,
)
from dagster_pandas.data_frame import create_structured_dataframe_type
from numpy import float64, int64
from pandas import DataFrame
from dagster import DagsterType, Output, OutputDefinition, execute_pipeline, pipeline, solid
dtype_is_num_validator = nonnull(dtype_in_set_validation_factory((int, float, int64, float64)))
in_range_validator = column_range_validation_factory(1, 3, ignore_missing_vals=True)
column_validator = MultiColumnConstraintWithMetadata(
"confirms values are numbers in a range",
{'foo': [dtype_is_num_validator, in_range_validator], 'bar': [dtype_is_num_validator]},
ColumnWithMetadataException,
raise_or_typecheck=False,
)
aggregate_validator = MultiAggregateConstraintWithMetadata(
"confirms all values are unique",
{'bar': [all_unique_validator]},
ConstraintWithMetadataException,
raise_or_typecheck=False,
)
dataframe_validator = StrictColumnsWithMetadata(["foo", "bar"], raise_or_typecheck=False)
def test_structured_type_creation():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
assert isinstance(ntype, DagsterType)
def test_successful_type_eval():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
@solid(output_defs=[OutputDefinition(name='basic_dataframe', dagster_type=ntype)])
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 3], 'bar': [9, 10, 11]}), output_name='basic_dataframe',
)
@pipeline
def basic_pipeline():
return create_dataframe()
result = execute_pipeline(basic_pipeline)
assert result.success
def test_failing_type_eval_column():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
@solid(output_defs=[OutputDefinition(name='basic_dataframe', dagster_type=ntype)])
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 'a', 7], 'bar': [9, 10, 11]}), output_name='basic_dataframe',
)
@pipeline
def basic_pipeline():
return create_dataframe()
result = execute_pipeline(basic_pipeline, raise_on_error=False)
output = [item for item in result.step_event_list if item.is_successful_output][0]
output_data = output.event_specific_data.type_check_data
output_metadata = output_data.metadata_entries
assert len(output_metadata) == 1
column_const = output_metadata[0]
assert column_const.label == 'columns-constraint-metadata'
column_const_data = column_const.entry_data.data
assert column_const_data['expected'] == {
'foo': {
'in_range_validation_fn': in_range_validator.__doc__.strip(),
'dtype_in_set_validation_fn': dtype_is_num_validator.__doc__.strip(),
}
}
assert column_const_data['offending'] == {
'foo': {
'dtype_in_set_validation_fn': ['row 1'],
'in_range_validation_fn': ['row 1', 'row 2'],
}
}
assert column_const_data['actual'] == {
'foo': {'dtype_in_set_validation_fn': ['a'], 'in_range_validation_fn': ['a', 7]}
}
def test_failing_type_eval_aggregate():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
@solid(output_defs=[OutputDefinition(name='basic_dataframe', dagster_type=ntype)])
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 3], 'bar': [9, 10, 10]}), output_name='basic_dataframe',
)
@pipeline
def basic_pipeline():
return create_dataframe()
result = execute_pipeline(basic_pipeline, raise_on_error=False)
output = [item for item in result.step_event_list if item.is_successful_output][0]
output_data = output.event_specific_data.type_check_data
output_metadata = output_data.metadata_entries
assert len(output_metadata) == 1
column_const = output_metadata[0]
assert column_const.label == 'column-aggregates-constraint-metadata'
column_const_data = column_const.entry_data.data
assert column_const_data['expected'] == {
'bar': {'all_unique_validator': all_unique_validator.__doc__.strip()}
}
assert column_const_data['offending'] == {'bar': {'all_unique_validator': 'a violation'}}
assert column_const_data['actual'] == {'bar': {'all_unique_validator': [10.0]}}
def test_failing_type_eval_dataframe():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
@solid(output_defs=[OutputDefinition(name='basic_dataframe', dagster_type=ntype)])
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 3], 'baz': [9, 10, 10]}), output_name='basic_dataframe',
)
@pipeline
def basic_pipeline():
return create_dataframe()
result = execute_pipeline(basic_pipeline, raise_on_error=False)
output = [item for item in result.step_event_list if item.is_successful_output][0]
output_data = output.event_specific_data.type_check_data
output_metadata = output_data.metadata_entries
assert len(output_metadata) == 1
column_const = output_metadata[0]
assert column_const.label == 'dataframe-constraint-metadata'
column_const_data = column_const.entry_data.data
assert column_const_data['expected'] == ['foo', 'bar']
assert column_const_data['actual'] == {'extra_columns': ['baz'], 'missing_columns': ['bar']}
def test_failing_type_eval_multi_error():
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator,
columns_aggregate_validator=aggregate_validator,
dataframe_validator=dataframe_validator,
)
@solid(output_defs=[OutputDefinition(name='basic_dataframe', dagster_type=ntype)])
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 'a', 7], 'baz': [9, 10, 10], 'bar': [9, 10, 10]}),
output_name='basic_dataframe',
)
@pipeline
def basic_pipeline():
return create_dataframe()
result = execute_pipeline(basic_pipeline, raise_on_error=False)
output = [item for item in result.step_event_list if item.is_successful_output][0]
output_data = output.event_specific_data.type_check_data
output_metadata = output_data.metadata_entries
assert len(output_metadata) == 3
agg_data = output_metadata[0]
assert agg_data.label == 'column-aggregates-constraint-metadata'
agg_metadata = agg_data.entry_data.data
assert agg_metadata['expected'] == {
'bar': {'all_unique_validator': all_unique_validator.__doc__.strip()}
}
assert agg_metadata['offending'] == {'bar': {'all_unique_validator': 'a violation'}}
assert agg_metadata['actual'] == {'bar': {'all_unique_validator': [10.0]}}
column_const = output_metadata[1]
assert column_const.label == 'columns-constraint-metadata'
column_const_data = column_const.entry_data.data
assert column_const_data['expected'] == {
'foo': {
'in_range_validation_fn': in_range_validator.__doc__.strip(),
'dtype_in_set_validation_fn': dtype_is_num_validator.__doc__.strip(),
}
}
assert column_const_data['offending'] == {
'foo': {
'dtype_in_set_validation_fn': ['row 1'],
'in_range_validation_fn': ['row 1', 'row 2'],
}
}
assert column_const_data['actual'] == {
'foo': {'dtype_in_set_validation_fn': ['a'], 'in_range_validation_fn': ['a', 7]}
}
df_data = output_metadata[2]
assert df_data.label == 'dataframe-constraint-metadata'
df_metadata = df_data.entry_data.data
assert df_metadata['expected'] == ['foo', 'bar']
assert df_metadata['actual'] == {'extra_columns': ['baz'], 'missing_columns': []}
| 37.417021
| 96
| 0.705561
|
8c2b0649a39b382b87bace37360871810f090884
| 838,155
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
alsyz/genieparser
|
e80a219851aa074482f9cccee1cb9fb42216e225
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
alsyz/genieparser
|
e80a219851aa074482f9cccee1cb9fb42216e225
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
alsyz/genieparser
|
e80a219851aa074482f9cccee1cb9fb42216e225
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
import sys
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from textwrap import dedent
ats_mock = Mock()
with patch.dict('sys.modules',
{'pyats' : ats_mock}, autospec=True):
import genie.parsergen
from genie.parsergen import oper_fill
from genie.parsergen import oper_check
from genie.parsergen import oper_fill_tabular
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxe.show_interface import (
ShowInterfaces,
ShowIpInterface,
ShowIpv6Interface,
ShowInterfacesStats,
ShowInterfacesTrunk,
ShowInterfacesStatus,
ShowInterfacesCounters,
ShowInterfacesAccounting,
ShowInterfacesSwitchport,
ShowInterfacesDescription,
ShowIpInterfaceBriefPipeIp,
ShowIpInterfaceBriefPipeVlan,
ShowInterfaceTransceiverDetail)
class TestShowInterfaceParsergen(unittest.TestCase):
def test_tabular_parser(self):
self.showCommandOutput='''
R1#show ip interface brief
Interface IP-Address OK? Method Status Protocol
GigabitEthernet0/0 10.1.10.20 YES NVRAM up up
GigabitEthernet1/0/1 unassigned YES unset up up
GigabitEthernet1/0/10 unassigned YES unset down down
'''
self.outputDict = {'GigabitEthernet0/0': {'IP-Address': '10.1.10.20',
'Interface': 'GigabitEthernet0/0',
'Method': 'NVRAM',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/1': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/1',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/10': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/10',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'down',
'Status': 'down'}}
# Define how device stub will behave when accessed by production parser.
device_kwargs = {'is_connected.return_value':True,
'execute.return_value':dedent(self.showCommandOutput)}
device1 = Mock(**device_kwargs)
device1.name='router3'
result = genie.parsergen.oper_fill_tabular(device=device1,
show_command="show ip interface brief",
refresh_cache=True,
header_fields=
[ "Interface",
"IP-Address",
"OK\?",
"Method",
"Status",
"Protocol" ],
label_fields=
[ "Interface",
"IP-Address",
"OK?",
"Method",
"Status",
"Protocol" ],
index=[0])
self.assertEqual(result.entries, self.outputDict)
args, kwargs = device1.execute.call_args
self.assertTrue('show ip interface brief' in args,
msg='The expected command was not sent to the router')
#############################################################################
# unitest For show ip interfaces brief pipe ip
#############################################################################
class TestShowIpInterfacesBriefPipeIp(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'interface':
{'GigabitEthernet0/0': {'interface_ok': 'YES',
'interface_status': 'up',
'ip_address': '10.1.18.80',
'method': 'manual',
'protocol_status': 'up'}}}
golden_output = {'execute.return_value': '''
R1#sh ip int brief | i 10.1.18.80
GigabitEthernet0/0 10.1.18.80 YES manual up up
'''}
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device)
parsed_output = obj.parse(ip='10.1.18.80')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(ip='10.1.18.80')
# Comment out due to old version of yang, will enhance it
# class test_show_interface_brief_pipe_vlan_yang(unittest.TestCase):
# device = Device(name='aDevice')
# device1 = Device(name='bDevice')
# golden_parsed_output = {'interface': {'Vlan1': {'vlan_id': {'1': {'ip_address': 'unassigned'}}},
# 'Vlan100': {'vlan_id': {'100': {'ip_address': '192.168.234.1'}}}}}
# class etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# <name>1</name>
# <ip>
# <no-address>
# <address>False</address>
# </no-address>
# </ip>
# <shutdown/>
# </Vlan>
# <Vlan>
# <name>100</name>
# <ip>
# <address>
# <primary>
# <address>192.168.234.1</address>
# <mask>255.255.255.0</mask>
# </primary>
# </address>
# </ip>
# <ipv6>
# <address>
# <prefix-list>
# <prefix>2001::12:30/128</prefix>
# </prefix-list>
# </address>
# </ipv6>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# golden_output = {'get.return_value': etree_holder()}
# def test_golden(self):
# self.device = Mock(**self.golden_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.golden_parsed_output)
# empty_parsed_output = {'interface': {}}
# class empty_etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# empty_output = {'get.return_value': empty_etree_holder()}
# def test_empty(self):
# self.device1 = Mock(**self.empty_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device1)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.empty_parsed_output)
#############################################################################
# unitest For Show Interfaces switchport
#############################################################################
class TestShowInterfacesSwitchport(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/4": {
"switchport_mode": "trunk",
"pruning_vlans": "2-1001",
'operational_mode': 'trunk',
"switchport_enable": True,
"trunk_vlans": "200-211",
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"access_vlan_name": "default",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"native_vlan_name": "default",
"administrative_encapsulation": "dot1q"
}
},
"GigabitEthernet1/0/2": {
"pruning_vlans": "2-1001",
"switchport_enable": True,
"unknown_multicast_blocked": False,
"trunk_vlans": "100-110",
"port_channel": {
"port_channel_int": "Port-channel12",
"port_channel_member": True
},
"access_vlan": "1",
"access_vlan_name": "default",
"operational_mode": "trunk",
"unknown_unicast_blocked": False,
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q",
"operational": "10 (VLAN0010) 100 (VLAN0100)",
"trunk_mappings": "10 (VLAN0010) 100 (VLAN0100)"
},
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"native_vlan_name": "default",
"administrative_encapsulation": "dot1q"
},
"protected": False,
"native_vlan_tagging": True,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"switchport_mode": "trunk"
},
"GigabitEthernet1/0/5": {
"switchport_mode": "static access",
"pruning_vlans": "2-1001",
"switchport_enable": True,
"trunk_vlans": "all",
'operational_mode': 'down',
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"access_vlan_name": "default",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": False,
"capture_vlans": "all",
"encapsulation": {
"native_vlan": "1",
"native_vlan_name": "default",
"administrative_encapsulation": "dot1q"
}
},
"Port-channel12": {
"access_vlan": "100",
"access_vlan_name": "Falback-Data",
"switchport_enable": True,
"private_vlan": {
"encapsulation": "dot1q",
"native_vlan_tagging": True
},
"native_vlan_tagging": False,
"negotiation_of_trunk": True,
"unknown_unicast_blocked": False,
"protected": False,
"encapsulation": {
"administrative_encapsulation": "dot1q",
"native_vlan": "0",
"native_vlan_name": "Inactive"
},
"switchport_mode": "trunk",
"unknown_multicast_blocked": False,
"trunk_vlans": "100,101,110-120,121,130,170,180,333-355,366-400,540,601-605,606,607,1111,1113,1200-1234,1400-1456,1567",
"operational_mode": "down",
"pruning_vlans": "2-1001",
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": [
"GigabitEthernet1/0/2"
]
},
"voice_vlan": "100",
"voice_vlan_name": "Fallback-Voice"
}
}
golden_output = {'execute.return_value': '''
Name: Gi1/0/2
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk (member of bundle Po12)
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings:
10 (VLAN0010) 100 (VLAN0100)
Operational private-vlan:
10 (VLAN0010) 100 (VLAN0100)
Trunking VLANs Enabled: 100-110
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: False
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/4
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 200-211
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: False
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/5
Switchport: Enabled
Administrative Mode: static access
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: Off
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: ALL
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: False
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Po12
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 100 (Falback-Data)
Trunking Native Mode VLAN: 0 (Inactive)
Administrative Native VLAN tagging: disabled
Voice VLAN: 100 (Fallback-Voice)
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 100,101,110-120,121,130,170,180,
333-355,366-400,540,601-605,606,607,
1111,1113,1200-1234,1400-1456,1567
Pruning VLANs Enabled: 2-1001
Protected: False
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
'''}
golden_output_2 = {'execute.return_value': '''
Name: Te1/1/2
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk (member of bundle Po12)
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Operational Dot1q Ethertype: 0x8100
Negotiation of Trunking: Off
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Operational Native VLAN tagging: disabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Operational private-vlan: none
Trunking VLANs Enabled: 1,111,130,131,400,405,410,420,430,439-442,450,451,460,
470,480,490,500,616,619,700,709-712,720,723-725,760
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Name: Po12
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Operational Dot1q Ethertype: 0x8100
Negotiation of Trunking: Off
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Operational Native VLAN tagging: disabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Operational private-vlan: none
Trunking VLANs Enabled: 1,111,130,131,400,405,410,420,430,439-442,450,451,460,
470,480,490,500,616,619,700,709-712,720,723-725,760
Pruning VLANs Enabled: 2-1001
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
'''}
golden_parsed_output_2 = {
'Port-channel12': {
'operational_mode': 'trunk',
'switchport_mode': 'trunk',
'access_vlan_name': 'default',
'private_vlan': {
},
'switchport_enable': True,
'native_vlan_tagging': True,
'negotiation_of_trunk': False,
'encapsulation': {
'native_vlan': '1',
'native_vlan_name': 'default',
'operational_encapsulation': 'dot1q',
'administrative_encapsulation': 'dot1q',
},
'port_channel': {
'port_channel_member_intfs': ['TenGigabitEthernet1/1/2'],
'port_channel_member': True,
},
'pruning_vlans': '2-1001',
'access_vlan': '1',
'unknown_multicast_blocked': False,
'trunk_vlans': '1,111,130,131,400,405,410,420,430,439-442,450,451,460,470,480,490,500,616,619,700,709-712,720,723-725,760',
'unknown_unicast_blocked': False,
},
'TenGigabitEthernet1/1/2': {
'access_vlan': '1',
'operational_mode': 'trunk',
'switchport_mode': 'trunk',
'access_vlan_name': 'default',
'switchport_enable': True,
'private_vlan': {
},
'capture_mode': False,
'trunk_vlans': '1,111,130,131,400,405,410,420,430,439-442,450,451,460,470,480,490,500,616,619,700,709-712,720,723-725,760',
'capture_vlans': 'all',
'negotiation_of_trunk': False,
'unknown_multicast_blocked': False,
'port_channel': {
'port_channel_int': 'Port-channel12',
'port_channel_member': True,
},
'native_vlan_tagging': True,
'encapsulation': {
'native_vlan': '1',
'native_vlan_name': 'default',
'operational_encapsulation': 'dot1q',
'administrative_encapsulation': 'dot1q',
},
'unknown_unicast_blocked': False,
'pruning_vlans': '2-1001',
},
}
def test_golden(self):
self.device = Mock(**self.golden_output)
intf_obj = ShowInterfacesSwitchport(device=self.device)
parsed_output = intf_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
intf_obj = ShowInterfacesSwitchport(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = intf_obj.parse()
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
intf_obj = ShowInterfacesSwitchport(device=self.device)
parsed_output = intf_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output_2)
#############################################################################
# unitest For Show Interfaces
#############################################################################
# class TestShowInterfaces(unittest.TestCase):
# device = Device(name='aDevice')
# empty_output = {'execute.return_value': ''}
# golden_parsed_output = {
# "Port-channel12": {
# "flow_control": {
# "send": False,
# "receive": False
# },
# "type": "EtherChannel",
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "1d23h",
# "out_interface_resets": 2,
# "in_mac_pause_frames": 0,
# "out_collision": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "in_rate": 2000,
# "in_rate_pkts": 2
# },
# "in_watchdog": 0,
# "out_deferred": 0,
# "out_mac_pause_frames": 0,
# "in_pkts": 961622,
# "in_multicast_pkts": 4286699522,
# "in_runts": 0,
# "out_unknown_protocl_drops": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_lost_carrier": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 72614643,
# "in_crc_errors": 0,
# "out_no_carrier": 0,
# "in_with_dribble": 0,
# "in_broadcast_pkts": 944788,
# "out_pkts": 39281,
# "out_late_collision": 0,
# "out_octets": 6235318,
# "in_overrun": 0,
# "out_babble": 0
# },
# "auto_negotiate": True,
# "phys_address": "0057.d2ff.422a",
# "keepalive": 10,
# "output_hang": "never",
# "txload": "1/255",
# "oper_status": "up",
# "arp_type": "arpa",
# "rxload": "1/255",
# "duplex_mode": "full",
# "link_type": "auto",
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 2000,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 0,
# "queue_strategy": "fifo"
# },
# "encapsulations": {
# "encapsulation": "qinq virtual lan",
# "first_dot1q": "10",
# "second_dot1q": "20",
# },
# "last_input": "never",
# "last_output": "1d22h",
# "line_protocol": "up",
# "mac_address": "0057.d2ff.422a",
# "connected": True,
# "port_channel": {
# "port_channel_member": True,
# "port_channel_member_intfs": ['GigabitEthernet1/0/2'],
# },
# "arp_timeout": "04:00:00",
# "bandwidth": 1000000,
# 'port_speed': '1000mb/s',
# "enabled": True,
# "mtu": 1500,
# "delay": 10,
# "reliability": "255/255"
# },
# "GigabitEthernet1/0/1": {
# "flow_control": {
# "send": False,
# "receive": False
# },
# "type": "Gigabit Ethernet",
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "1d02h",
# "out_interface_resets": 2,
# "in_mac_pause_frames": 0,
# "out_collision": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 30,
# "out_rate": 0,
# "in_rate": 0,
# "in_rate_pkts": 0
# },
# "in_watchdog": 0,
# "out_deferred": 0,
# "out_mac_pause_frames": 0,
# "in_pkts": 12127,
# "in_multicast_pkts": 4171,
# "in_runts": 0,
# "out_unknown_protocl_drops": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_lost_carrier": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 2297417,
# "in_crc_errors": 0,
# "out_no_carrier": 0,
# "in_with_dribble": 0,
# "in_broadcast_pkts": 0,
# "out_pkts": 12229,
# "out_late_collision": 0,
# "out_octets": 2321107,
# "in_overrun": 0,
# "out_babble": 0
# },
# "phys_address": "0057.d2ff.428c",
# "keepalive": 10,
# "output_hang": "never",
# "txload": "1/255",
# "description": "desc",
# "oper_status": "down",
# "arp_type": "arpa",
# "rxload": "1/255",
# "duplex_mode": "auto",
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 375,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 40,
# "queue_strategy": "fifo"
# },
# "ipv4": {
# "10.1.1.1/24": {
# "prefix_length": "24",
# "ip": "10.1.1.1"
# }
# },
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "last_input": "never",
# "last_output": "04:39:18",
# "line_protocol": "down",
# "mac_address": "0057.d2ff.428c",
# "connected": False,
# "port_channel": {
# "port_channel_member": False
# },
# "media_type": "10/100/1000BaseTX",
# "bandwidth": 768,
# 'port_speed': '1000mb/s',
# "enabled": False,
# "arp_timeout": "04:00:00",
# "mtu": 1500,
# "delay": 3330,
# "reliability": "255/255"
# },
# "GigabitEthernet3": {
# "flow_control": {
# "send": False,
# "receive": False
# },
# "type": "CSR vNIC",
# 'auto_negotiate': True,
# 'duplex_mode': 'full',
# 'link_type': 'auto',
# 'media_type': 'RJ45',
# 'port_speed': '1000mbps',
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "never",
# "out_interface_resets": 1,
# "in_mac_pause_frames": 0,
# "out_collision": 0,
# "in_crc_errors": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "in_rate": 0,
# "in_rate_pkts": 0
# },
# "in_watchdog": 0,
# "out_deferred": 0,
# "out_mac_pause_frames": 0,
# "in_pkts": 6,
# "in_multicast_pkts": 0,
# "in_runts": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 480,
# "out_unknown_protocl_drops": 0,
# "out_no_carrier": 0,
# "out_lost_carrier": 0,
# "in_broadcast_pkts": 0,
# "out_pkts": 28,
# "out_late_collision": 0,
# "out_octets": 7820,
# "in_overrun": 0,
# "out_babble": 0
# },
# "phys_address": "5254.00ff.0e7e",
# "keepalive": 10,
# "output_hang": "never",
# "txload": "1/255",
# "reliability": "255/255",
# "arp_type": "arpa",
# "rxload": "1/255",
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 375,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 40,
# "queue_strategy": "fifo"
# },
# "ipv4": {
# "192.168.154.1/24": {
# "prefix_length": "24",
# "ip": "192.168.154.1"
# },
# "unnumbered": {
# "interface_ref": "Loopback0"
# }
# },
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "last_output": "00:00:27",
# "line_protocol": "up",
# "mac_address": "5254.00ff.0e7e",
# "oper_status": "up",
# "port_channel": {
# "port_channel_member": False
# },
# "arp_timeout": "04:00:00",
# "bandwidth": 1000000,
# "enabled": True,
# "mtu": 1500,
# "delay": 10,
# "last_input": "never"
# },
# "Loopback0": {
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 75,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 0,
# "queue_strategy": "fifo"
# },
# "mtu": 1514,
# "encapsulations": {
# "encapsulation": "loopback"
# },
# "last_output": "never",
# "type": "Loopback",
# "line_protocol": "up",
# "oper_status": "up",
# "keepalive": 10,
# "output_hang": "never",
# "txload": "1/255",
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "1d04h",
# "out_interface_resets": 0,
# "out_collision": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "in_rate": 0,
# "in_rate_pkts": 0
# },
# "in_pkts": 0,
# "in_multicast_pkts": 0,
# "in_runts": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 0,
# "in_crc_errors": 0,
# "out_unknown_protocl_drops": 0,
# "in_broadcast_pkts": 0,
# "out_pkts": 72,
# "out_octets": 5760,
# "in_overrun": 0,
# "in_abort": 0
# },
# "reliability": "255/255",
# "bandwidth": 8000000,
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "ipv4": {
# "192.168.154.1/24": {
# "prefix_length": "24",
# "ip": "192.168.154.1"
# }
# },
# "rxload": "1/255",
# "delay": 5000,
# "last_input": "1d02h"
# },
# "Vlan100": {
# "type": "Ethernet SVI",
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "1d04h",
# "out_interface_resets": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "in_rate": 0,
# "in_rate_pkts": 0
# },
# "in_pkts": 50790,
# "in_multicast_pkts": 0,
# "in_runts": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 3657594,
# "in_crc_errors": 0,
# "out_unknown_protocl_drops": 0,
# "in_broadcast_pkts": 0,
# "out_pkts": 72,
# "out_octets": 5526,
# "in_overrun": 0
# },
# "phys_address": "0057.d2ff.4279",
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 375,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 40,
# "queue_strategy": "fifo"
# },
# "txload": "1/255",
# "reliability": "255/255",
# "arp_type": "arpa",
# "rxload": "1/255",
# "output_hang": "never",
# "ipv4": {
# "192.168.234.1/24": {
# "prefix_length": "24",
# "ip": "192.168.234.1"
# }
# },
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "last_output": "1d03h",
# "line_protocol": "up",
# "mac_address": "0057.d2ff.4279",
# "oper_status": "up",
# "port_channel": {
# "port_channel_member": False
# },
# "arp_timeout": "04:00:00",
# "bandwidth": 1000000,
# "enabled": True,
# "mtu": 1500,
# "delay": 10,
# "last_input": "never"
# },
# "GigabitEthernet1/0/2": {
# "flow_control": {
# "send": False,
# "receive": False
# },
# "type": "Gigabit Ethernet",
# "counters": {
# "out_buffer_failure": 0,
# "out_underruns": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_frame": 0,
# "in_ignored": 0,
# "last_clear": "1d02h",
# "out_interface_resets": 5,
# "in_mac_pause_frames": 0,
# "out_collision": 0,
# "rate": {
# "out_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "in_rate": 3000,
# "in_rate_pkts": 5
# },
# "in_watchdog": 0,
# "out_deferred": 0,
# "out_mac_pause_frames": 0,
# "in_pkts": 545526,
# "in_multicast_pkts": 535961,
# "in_runts": 0,
# "out_unknown_protocl_drops": 0,
# "in_no_buffer": 0,
# "out_buffers_swapped": 0,
# "out_lost_carrier": 0,
# "out_errors": 0,
# "in_errors": 0,
# "in_octets": 41210298,
# "in_crc_errors": 0,
# "out_no_carrier": 0,
# "in_with_dribble": 0,
# "in_broadcast_pkts": 535961,
# "out_pkts": 23376,
# "out_late_collision": 0,
# "out_octets": 3642296,
# "in_overrun": 0,
# "out_babble": 0
# },
# "phys_address": "0057.d2ff.422a",
# "keepalive": 10,
# "output_hang": "never",
# "txload": "1/255",
# "oper_status": "up",
# "arp_type": "arpa",
# "media_type": "10/100/1000BaseTX",
# "rxload": "1/255",
# "duplex_mode": "full",
# "queues": {
# "input_queue_size": 0,
# "total_output_drop": 0,
# "input_queue_drops": 0,
# "input_queue_max": 2000,
# "output_queue_size": 0,
# "input_queue_flushes": 0,
# "output_queue_max": 40,
# "queue_strategy": "fifo"
# },
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "last_input": "never",
# "last_output": "00:00:02",
# "line_protocol": "up",
# "mac_address": "0057.d2ff.422a",
# "connected": True,
# "port_channel": {
# "port_channel_member": True,
# 'port_channel_int': 'Port-channel12',
# },
# "arp_timeout": "04:00:00",
# "bandwidth": 1000000,
# 'port_speed': '1000mb/s',
# "enabled": True,
# "mtu": 1500,
# "delay": 10,
# "reliability": "255/255"
# },
# "GigabitEthernet0/0/4": {
# "arp_timeout": "04:00:00",
# "arp_type": "arpa",
# "bandwidth": 1000000,
# 'auto_negotiate': True,
# "counters": {
# "in_broadcast_pkts": 0,
# "in_crc_errors": 0,
# "in_errors": 0,
# "in_frame": 0,
# "in_giants": 0,
# "in_ignored": 0,
# "in_mac_pause_frames": 0,
# "in_multicast_pkts": 0,
# "in_no_buffer": 0,
# "in_octets": 0,
# "in_overrun": 0,
# "in_pkts": 0,
# "in_runts": 0,
# "in_throttles": 0,
# "in_watchdog": 0,
# "last_clear": "never",
# "out_babble": 0,
# "out_collision": 0,
# "out_deferred": 0,
# "out_errors": 0,
# "out_interface_resets": 1,
# "out_late_collision": 0,
# "out_lost_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_no_carrier": 0,
# "out_octets": 0,
# "out_pkts": 0,
# "out_underruns": 0,
# "out_unknown_protocl_drops": 0,
# "rate": {
# "in_rate": 0,
# "in_rate_pkts": 0,
# "load_interval": 300,
# "out_rate": 0,
# "out_rate_pkts": 0
# }
# },
# "delay": 10,
# 'duplex_mode': 'full',
# 'link_type': 'auto',
# 'port_speed': '1000mbps',
# 'media_type': 'unknown',
# "enabled": False,
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "flow_control": {
# "receive": False, "send": False
# },
# "last_input": "never",
# "last_output": "never",
# "line_protocol": "down",
# "mac_address": "380e.4dff.dc72",
# "phys_address": "380e.4dff.dc72",
# "mtu": 1500,
# "oper_status": "down",
# "output_hang": "never",
# "port_channel": {
# "port_channel_member": False
# },
# "queues": {
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "input_queue_max": 375,
# "input_queue_size": 0,
# "output_queue_max": 40,
# "output_queue_size": 0,
# "queue_strategy": "fifo",
# "total_output_drop": 0
# },
# "reliability": "255/255",
# "rxload": "1/255",
# "txload": "1/255",
# "type": "BUILT-IN-2T+6X1GE"
# }
# }
# golden_output = {'execute.return_value': '''
# GigabitEthernet1/0/1 is administratively down, line protocol is down (disabled)
# Hardware is Gigabit Ethernet, address is 0057.d2ff.428c (bia 0057.d2ff.428c)
# Description: desc
# Internet address is 10.1.1.1/24
# MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
# input flow-control is off, output flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 04:39:18, output hang never
# Last clearing of "show interface" counters 1d02h
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 30 second input rate 0 bits/sec, 0 packets/sec
# 30 second output rate 0 bits/sec, 0 packets/sec
# 12127 packets input, 2297417 bytes, 0 no buffer
# Received 4173 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 4171 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 12229 packets output, 2321107 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet1/0/2 is up, line protocol is up (connected)
# Hardware is Gigabit Ethernet, address is 0057.d2ff.422a (bia 0057.d2ff.422a)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
# input flow-control is off, output flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 00:00:02, output hang never
# Last clearing of "show interface" counters 1d02h
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 3000 bits/sec, 5 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 545526 packets input, 41210298 bytes, 0 no buffer
# Received 535996 broadcasts (535961 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 535961 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 23376 packets output, 3642296 bytes, 0 underruns
# 0 output errors, 0 collisions, 5 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet3 is up, line protocol is up
# Hardware is CSR vNIC, address is 5254.00ff.0e7e (bia 5254.00ff.0e7e)
# Interface is unnumbered. Using address of Loopback0 (192.168.154.1)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full Duplex, 1000Mbps, link type is auto, media type is RJ45
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 00:00:27, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 6 packets input, 480 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 28 packets output, 7820 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# Loopback0 is up, line protocol is up
# Hardware is Loopback
# Internet address is 192.168.154.1/24
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input 1d02h, output never, output hang never
# Last clearing of "show interface" counters 1d04h
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 72 packets output, 5760 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan100 is up, line protocol is up
# Hardware is Ethernet SVI, address is 0057.d2ff.4279 (bia 0057.d2ff.4279)
# Internet address is 192.168.234.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 1d03h, output hang never
# Last clearing of "show interface" counters 1d04h
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 50790 packets input, 3657594 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 72 packets output, 5526 bytes, 0 underruns
# 0 output errors, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Port-channel12 is up, line protocol is up (connected)
# Hardware is EtherChannel, address is 0057.d2ff.422a (bia 0057.d2ff.422a)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
# Keepalive set (10 sec)
# Full-duplex, 1000Mb/s, link type is auto, media type is
# input flow-control is off, output flow-control is unsupported
# Members in this channel: Gi1/0/2
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 1d22h, output hang never
# Last clearing of "show interface" counters 1d23h
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 2000 bits/sec, 2 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 961622 packets input, 72614643 bytes, 0 no buffer
# Received 944818 broadcasts (944788 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 4286699522 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 39281 packets output, 6235318 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet0/0/4 is administratively down, line protocol is down
# Hardware is BUILT-IN-2T+6X1GE, address is 380e.4dff.dc72 (bia 380e.4dff.dc72)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 1000Mbps, link type is auto, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# '''}
# golden_interface_output = {'execute.return_value': '''
# CE1#show interfaces GigabitEthernet1
# GigabitEthernet1 is up, line protocol is up
# Hardware is CSR vNIC, address is 5e00.00ff.0101 (bia 5e00.00ff.0101)
# Internet address is 172.16.1.243/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full Duplex, 1000Mbps, link type is auto, media type is Virtual
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:02, output 00:00:25, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 32000 bits/sec, 28 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 7658 packets input, 1125842 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 44 packets output, 4324 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# '''}
# golden_parsed_interface_output={
# "GigabitEthernet1": {
# "rxload": "1/255",
# "phys_address": "5e00.00ff.0101",
# "flow_control": {
# "send": False,
# "receive": False
# },
# "arp_type": "arpa",
# "type": "CSR vNIC",
# "enabled": True,
# "media_type": "Virtual",
# "last_input": "00:00:02",
# "link_type": "auto",
# "last_output": "00:00:25",
# "counters": {
# "in_errors": 0,
# "in_frame": 0,
# "in_watchdog": 0,
# "out_babble": 0,
# "in_overrun": 0,
# "out_collision": 0,
# "out_buffer_failure": 0,
# "out_no_carrier": 0,
# "in_runts": 0,
# "out_late_collision": 0,
# "in_mac_pause_frames": 0,
# "out_underruns": 0,
# "out_pkts": 44,
# "in_ignored": 0,
# "in_pkts": 7658,
# "out_buffers_swapped": 0,
# "out_interface_resets": 1,
# "rate": {
# "out_rate": 0,
# "load_interval": 300,
# "in_rate_pkts": 28,
# "out_rate_pkts": 0,
# "in_rate": 32000
# },
# "out_mac_pause_frames": 0,
# "in_broadcast_pkts": 0,
# "in_no_buffer": 0,
# "out_deferred": 0,
# "in_crc_errors": 0,
# "out_octets": 4324,
# "out_lost_carrier": 0,
# "in_octets": 1125842,
# "out_unknown_protocl_drops": 0,
# "last_clear": "never",
# "in_throttles": 0,
# "in_multicast_pkts": 0,
# "out_errors": 0,
# "in_giants": 0
# },
# "keepalive": 10,
# "mtu": 1500,
# "delay": 10,
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "ipv4": {
# "172.16.1.243/24": {
# "ip": "172.16.1.243",
# "prefix_length": "24"
# }
# },
# "queues": {
# "output_queue_size": 0,
# "input_queue_size": 0,
# "input_queue_flushes": 0,
# "queue_strategy": "fifo",
# "total_output_drop": 0,
# "output_queue_max": 40,
# "input_queue_drops": 0,
# "input_queue_max": 375
# },
# "auto_negotiate": True,
# "line_protocol": "up",
# "oper_status": "up",
# "duplex_mode": "full",
# "bandwidth": 1000000,
# "arp_timeout": "04:00:00",
# 'port_speed': '1000mbps',
# "port_channel": {
# "port_channel_member": False
# },
# "output_hang": "never",
# "txload": "1/255",
# "mac_address": "5e00.00ff.0101",
# "reliability": "255/255"
# }
# }
# golden_output2 = {'execute.return_value': '''
# show interfaces
# Vlan1 is administratively down, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 1 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan15 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan101 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.205.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 29000 bits/sec, 50 packets/sec
# 5 minute output rate 5000 bits/sec, 10 packets/sec
# 3673498 packets input, 279750798 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813812 packets output, 60257018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan102 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.106.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 29000 bits/sec, 40 packets/sec
# 5 minute output rate 5000 bits/sec, 10 packets/sec
# 3632279 packets input, 276659268 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 804940 packets output, 59536912 bytes, 0 underruns
# 0 output errors, 7 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan103 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.9.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 29000 bits/sec, 50 packets/sec
# 5 minute output rate 5000 bits/sec, 10 packets/sec
# 3673834 packets input, 279772748 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813848 packets output, 60159890 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan104 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.169.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 50 packets/sec
# 5 minute output rate 5000 bits/sec, 10 packets/sec
# 3673653 packets input, 279762130 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813767 packets output, 60155916 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan105 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.76.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 29000 bits/sec, 50 packets/sec
# 5 minute output rate 5000 bits/sec, 10 packets/sec
# 3673610 packets input, 279756472 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813890 packets output, 60162584 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan106 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.240.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673779 packets input, 279773894 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813865 packets output, 60163538 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan107 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.151.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673882 packets input, 279781700 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813903 packets output, 60165230 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan108 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.64.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673638 packets input, 279766630 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813842 packets output, 60162384 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan109 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.234.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673894 packets input, 279781274 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 817800 packets output, 62192557 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan110 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.151.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674136 packets input, 279796126 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813960 packets output, 60168004 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan111 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.70.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673792 packets input, 279763870 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 822081 packets output, 60848654 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan112 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.246.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673848 packets input, 279779396 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813978 packets output, 60170234 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan113 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.169.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674092 packets input, 279792690 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813962 packets output, 60168782 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan114 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.94.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674118 packets input, 279801252 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813964 packets output, 60167610 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan115 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.21.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 52 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3688257 packets input, 280917432 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813950 packets output, 60167218 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan116 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.205.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 50 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674429 packets input, 279815742 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 816877 packets output, 60383316 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan117 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.136.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 50 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674114 packets input, 279794536 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 814083 packets output, 60178182 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan118 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.69.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 31000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3674811 packets input, 279845876 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813994 packets output, 60171406 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan119 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.4.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 51 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3691322 packets input, 281116276 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 814073 packets output, 60175212 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan120 is up, line protocol is up , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.196.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 30000 bits/sec, 50 packets/sec
# 5 minute output rate 6000 bits/sec, 11 packets/sec
# 3673948 packets input, 279785038 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 813996 packets output, 60171120 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan121 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.135.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:20, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan122 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.76.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:20, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan123 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.19.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:20, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan124 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.219.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:20, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan125 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.166.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:24, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan126 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.115.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:24, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan127 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.66.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:24, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan128 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.19.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:28, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan129 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.229.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:28, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan130 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.186.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:28, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan131 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.145.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:28, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan132 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.106.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:32, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan133 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.69.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:32, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan134 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.34.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:32, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan135 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.1.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:32, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan136 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.225.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:37, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan137 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.196.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:37, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan138 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.169.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:37, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan139 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.144.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:41, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Vlan140 is up, line protocol is down , Autostate Enabled
# Hardware is Ethernet SVI, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.121.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 06:39:41, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 163 packets output, 14018 bytes, 0 underruns
# 0 output errors, 2 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet0/0 is up, line protocol is up
# Hardware is RP management port, address is 70b3.17ff.6560 (bia 70b3.17ff.6560)
# Internet address is 10.9.1.20/16
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full Duplex, 1000Mbps, link type is auto, media type is RJ45
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:15, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 818000 bits/sec, 675 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 10341900 packets input, 2319228471 bytes, 0 no buffer
# Received 420554 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 8840 packets output, 993196 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/1 is down, line protocol is down (notconnect)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6560 (bia 70b3.17ff.6560)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:20
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/2 is up, line protocol is up (connected)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6561 (bia 70b3.17ff.6561)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full-duplex, 40Gb/s, link type is force-up, media type is QSFP 40G SR4 SFP
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:03, output hang never
# Last clearing of "show interface" counters 20:01:24
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 293000 bits/sec, 454 packets/sec
# 5 minute output rate 58000 bits/sec, 104 packets/sec
# 32521304 packets input, 2684387777 bytes, 0 no buffer
# Received 1481610 broadcasts (1476582 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 1476582 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 7498024 packets output, 525513005 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 2 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/3 is down, line protocol is down (notconnect)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6562 (bia 70b3.17ff.6562)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:24
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/4 is up, line protocol is up (connected)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6563 (bia 70b3.17ff.6563)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full-duplex, 40Gb/s, link type is force-up, media type is QSFP 40G SR BD SFP
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:15, output 00:00:03, output hang never
# Last clearing of "show interface" counters 20:01:24
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 102000 bits/sec, 186 packets/sec
# 5 minute output rate 329000 bits/sec, 524 packets/sec
# 13376239 packets input, 910225278 bytes, 0 no buffer
# Received 6304 broadcasts (6304 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 6304 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 37674953 packets output, 3020267756 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/5 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6564 (bia 70b3.17ff.6564)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:28
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/6 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6565 (bia 70b3.17ff.6565)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:28
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/7 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6566 (bia 70b3.17ff.6566)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:33
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/8 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6567 (bia 70b3.17ff.6567)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:33
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/9 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6568 (bia 70b3.17ff.6568)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:33
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/10 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6569 (bia 70b3.17ff.6569)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:37
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/11 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656a (bia 70b3.17ff.656a)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:37
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/12 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656b (bia 70b3.17ff.656b)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:37
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/13 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656c (bia 70b3.17ff.656c)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/14 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656d (bia 70b3.17ff.656d)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/15 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656e (bia 70b3.17ff.656e)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/16 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.656f (bia 70b3.17ff.656f)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/17 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6570 (bia 70b3.17ff.6570)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/18 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6571 (bia 70b3.17ff.6571)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/19 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6572 (bia 70b3.17ff.6572)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:50
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/20 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6573 (bia 70b3.17ff.6573)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:50
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/21 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6574 (bia 70b3.17ff.6574)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:50
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/22 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6575 (bia 70b3.17ff.6575)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:54
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/23 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6576 (bia 70b3.17ff.6576)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:54
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/24 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6577 (bia 70b3.17ff.6577)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:54
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/25 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6578 (bia 70b3.17ff.6578)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:58
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/26 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.6579 (bia 70b3.17ff.6579)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:58
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/27 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657a (bia 70b3.17ff.657a)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:01:58
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/28 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657b (bia 70b3.17ff.657b)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:02
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/29 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657c (bia 70b3.17ff.657c)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:02
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/30 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657d (bia 70b3.17ff.657d)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:02
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/31 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657e (bia 70b3.17ff.657e)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:07
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# FortyGigabitEthernet1/0/32 is down, line protocol is down (inactive)
# Hardware is Forty Gigabit Ethernet, address is 70b3.17ff.657f (bia 70b3.17ff.657f)
# MTU 1500 bytes, BW 40000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:07
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/33 is down, line protocol is down (inactive)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6580 (bia 70b3.17ff.6580)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:07
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/34 is down, line protocol is down (inactive)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6581 (bia 70b3.17ff.6581)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:11
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/35 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Description: connected to Ixia 1/6
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Keepalive set (10 sec)
# Full-duplex, 100Gb/s, link type is force-up, media type is QSFP 100G SR4
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:18, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:02:11
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 25000 bits/sec, 15 packets/sec
# 550971 packets input, 121771829 bytes, 0 no buffer
# Received 172754 broadcasts (0 IP multicasts)
# 0 runts, 206 giants, 0 throttles
# 206 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 172604 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 1536769 packets output, 437624881 bytes, 0 underruns
# 0 output errors, 0 collisions, 33 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/35.1 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.19.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 501.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13266 packets input, 2503842 bytes
# 13769 packets output, 2168924 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.2 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.76.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 502.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13254 packets input, 2501935 bytes
# 13784 packets output, 2170079 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.3 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.135.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 503.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13281 packets input, 2505791 bytes
# 13764 packets output, 2169079 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.4 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.196.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 504.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13273 packets input, 2500301 bytes
# 13766 packets output, 2168845 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.5 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.4.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 505.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13253 packets input, 2497502 bytes
# 13750 packets output, 2167640 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.6 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.69.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 506.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13261 packets input, 2502193 bytes
# 13744 packets output, 2167636 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.7 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.136.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 507.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13350 packets input, 2513375 bytes
# 13781 packets output, 2169851 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.8 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.205.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 508.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13292 packets input, 2510082 bytes
# 13777 packets output, 2169702 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.9 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.21.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 509.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13332 packets input, 2511802 bytes
# 13770 packets output, 2169056 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.10 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.94.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 510.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13282 packets input, 2502910 bytes
# 13777 packets output, 2168425 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.11 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.169.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 511.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13281 packets input, 2501618 bytes
# 13756 packets output, 2168163 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.12 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.246.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 512.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13255 packets input, 2502717 bytes
# 13765 packets output, 2168956 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.13 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.70.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 513.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13266 packets input, 2502358 bytes
# 13773 packets output, 2169451 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.14 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.151.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 514.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13347 packets input, 2513180 bytes
# 13794 packets output, 2171050 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.15 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.234.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 515.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13260 packets input, 2497442 bytes
# 13787 packets output, 2169487 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.16 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.64.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 516.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13336 packets input, 2512146 bytes
# 13773 packets output, 2169512 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.17 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.151.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 517.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13287 packets input, 2505612 bytes
# 13796 packets output, 2170930 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.18 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.240.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 518.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13263 packets input, 2502019 bytes
# 13780 packets output, 2169941 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.19 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.76.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 519.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13353 packets input, 2509614 bytes
# 13787 packets output, 2170375 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.20 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.169.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 520.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 13280 packets input, 2504934 bytes
# 13772 packets output, 2169331 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.101 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.9.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 101.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25478 packets input, 2598532 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.102 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.106.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 102.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25477 packets input, 2598430 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.103 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.205.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 103.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25479 packets input, 2598634 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.104 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.51.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 104.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25479 packets input, 2598634 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.105 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.154.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 105.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25477 packets input, 2598430 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.106 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.4.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 106.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25479 packets input, 2598634 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.107 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.111.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 107.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25476 packets input, 2598344 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.108 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.220.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 108.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25478 packets input, 2598532 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.109 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.76.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 109.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25478 packets input, 2598532 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/35.110 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.189.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 110.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 25478 packets input, 2598532 bytes
# 0 packets output, 0 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/36 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6583 (bia 70b3.17ff.6583)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:32
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/37 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6584 (bia 70b3.17ff.6584)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:32
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/38 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6585 (bia 70b3.17ff.6585)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:32
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/39 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6586 (bia 70b3.17ff.6586)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:37
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/40 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6587 (bia 70b3.17ff.6587)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:37
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/41 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.4.2/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full-duplex, 100Gb/s, link type is force-up, media type is QSFP 100G SR4
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:01, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:02:37
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 30 second input rate 39000 bits/sec, 50 packets/sec
# 30 second output rate 35000 bits/sec, 48 packets/sec
# 3581103 packets input, 340490834 bytes, 0 no buffer
# Received 20250 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 20089 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 3494815 packets output, 323841840 bytes, 0 underruns
# 0 output errors, 0 collisions, 5 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/42 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6589 (bia 70b3.17ff.6589)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/43 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.658a (bia 70b3.17ff.658a)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/44 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.658b (bia 70b3.17ff.658b)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:41
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/45 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.658c (bia 70b3.17ff.658c)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/46 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.658d (bia 70b3.17ff.658d)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/47 is down, line protocol is down (notconnect)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.658e (bia 70b3.17ff.658e)
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Auto-duplex, Auto-speed, link type is auto, media type is unknown
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:02:45
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/48 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 172.16.94.2/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Keepalive set (10 sec)
# Full-duplex, 100Gb/s, link type is force-up, media type is QSFP 100G SR4
# Fec is auto
# input flow-control is on, output flow-control is off
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:02:49
# Input queue: 3/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 30 second input rate 330000 bits/sec, 550 packets/sec
# 30 second output rate 301000 bits/sec, 547 packets/sec
# 39665255 packets input, 3012714995 bytes, 0 no buffer
# Received 548242 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 548066 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 39424533 packets output, 2729787452 bytes, 0 underruns
# 0 output errors, 0 collisions, 16 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# HundredGigE1/0/48.1 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.51.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 201.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426695 packets input, 222402380 bytes
# 1729535 packets output, 112615606 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.2 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.205.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 202.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3430077 packets input, 222808882 bytes
# 1733061 packets output, 113033370 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.3 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.106.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 203.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426685 packets input, 222402736 bytes
# 1729514 packets output, 112614680 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.4 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.9.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 204.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426926 packets input, 222417026 bytes
# 1729722 packets output, 112627684 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.5 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.169.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 205.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426916 packets input, 222416748 bytes
# 1729694 packets output, 112626186 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.6 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.76.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 206.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3427206 packets input, 222434908 bytes
# 1729813 packets output, 112633620 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.7 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.240.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 207.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426971 packets input, 222419906 bytes
# 1729823 packets output, 112634178 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.8 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.151.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 208.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426971 packets input, 222419256 bytes
# 1729821 packets output, 112634398 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.9 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.64.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 209.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3426848 packets input, 222412094 bytes
# 1729707 packets output, 112626654 bytes
# Last clearing of "show interface" counters never
# HundredGigE1/0/48.10 is up, line protocol is up (connected)
# Hardware is Hundred Gigabit Ethernet, address is 70b3.17ff.6500 (bia 70b3.17ff.6500)
# Internet address is 192.168.234.1/24
# MTU 1500 bytes, BW 100000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 210.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive set (10 sec)
# 3427137 packets input, 222430124 bytes
# 1729798 packets output, 112632450 bytes
# Last clearing of "show interface" counters never
# Bluetooth0/4 is administratively down, line protocol is down
# Hardware is BT management port, address is 70b3.17ff.6560 (bia 70b3.17ff.6560)
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Unknown, Unknown, link type is auto, media type is RJ45
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 1 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# Port-channel2 is up, line protocol is up (connected)
# Hardware is EtherChannel, address is 70b3.17ff.6561 (bia 70b3.17ff.6561)
# MTU 1500 bytes, BW 80000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full-duplex, 40Gb/s, link type is force-up, media type is N/A
# input flow-control is on, output flow-control is unsupported
# Members in this channel: Fo1/0/2 Fo1/0/4
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:04:37, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 389000 bits/sec, 630 packets/sec
# 5 minute output rate 385000 bits/sec, 622 packets/sec
# 45955737 packets input, 3599101746 bytes, 0 no buffer
# Received 1489774 broadcasts (1484746 multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 1484746 multicast, 0 pause input
# 0 input packets with dribble condition detected
# 45228880 packets output, 3550088514 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# Loopback1 is up, line protocol is up
# Hardware is Loopback
# Internet address is 192.168.154.1/32
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input 00:00:43, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 1383 packets output, 33608 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 1375 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Loopback10 is up, line protocol is up
# Hardware is Loopback
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Loopback101 is up, line protocol is up
# Hardware is Loopback
# Internet address is 10.204.1.2/32
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input 00:00:22, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 1338 packets output, 159232 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Loopback102 is up, line protocol is up
# Hardware is Loopback
# Internet address is 10.154.1.2/32
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input 00:00:16, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 1343 packets output, 160112 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel0 is up, line protocol is up
# Hardware is Tunnel
# Description: Pim Register Tunnel (Encap) for Embedded RP
# MTU 1452 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 2001:db8:9b4a:ffe5::1 (Vlan102), destination ::
# Tunnel Subblocks:
# src-track:
# Tunnel0 source tracking subblock associated with Vlan102
# Set of tunnels with source Vlan102, 1 member (includes iterators), on interface <OK>
# Tunnel protocol/transport PIM/IPv6
# Tunnel TTL 65
# Tunnel transport MTU 1452 bytes
# Tunnel is transmit only
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 20:03:05
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel1 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.25.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel1 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:03:11
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11176
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27765 packets output, 2695512 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel2 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.121.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel2 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:04, output hang never
# Last clearing of "show interface" counters 20:03:11
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11178
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27789 packets output, 2697642 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel3 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.219.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel3 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:01, output hang never
# Last clearing of "show interface" counters 20:03:11
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11179
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27780 packets output, 2696882 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel4 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.64.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel4 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:01, output hang never
# Last clearing of "show interface" counters 20:03:15
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11180
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27765 packets output, 2695606 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel5 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.166.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel5 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:01, output hang never
# Last clearing of "show interface" counters 20:03:15
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11176
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27769 packets output, 2695894 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel6 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.15.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel6 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:02, output hang never
# Last clearing of "show interface" counters 20:03:19
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11172
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27752 packets output, 2694338 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel7 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.121.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel7 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:02, output hang never
# Last clearing of "show interface" counters 20:03:19
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11176
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27778 packets output, 2696668 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel8 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.229.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel8 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:02, output hang never
# Last clearing of "show interface" counters 20:03:19
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11176
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27756 packets output, 2694776 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel9 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.84.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel9 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:03:23
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 11176
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 27775 packets output, 2696372 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel10 is up, line protocol is up
# Hardware is Tunnel
# Internet address is 172.16.186.1/24
# MTU 17868 bytes, BW 100 Kbit/sec, DLY 50000 usec,
# reliability 255/255, txload 40/255, rxload 135/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 10.154.1.2 (Loopback102), destination 10.154.1.1
# Tunnel Subblocks:
# src-track:
# Tunnel10 source tracking subblock associated with Loopback102
# Set of tunnels with source Loopback102, 10 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1476 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters 20:03:23
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 34678
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 53000 bits/sec, 94 packets/sec
# 5 minute output rate 16000 bits/sec, 23 packets/sec
# 6832599 packets input, 479845002 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 1674895 packets output, 151072685 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# '''
# }
# golden_parsed_output2 = {
# "Vlan1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": False,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 1,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan15": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan101": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.205.1/24": {
# "ip": "172.16.205.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 29000,
# "in_rate_pkts": 50,
# "out_rate": 5000,
# "out_rate_pkts": 10
# },
# "last_clear": "never",
# "in_pkts": 3673498,
# "in_octets": 279750798,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813812,
# "out_octets": 60257018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan102": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.106.1/24": {
# "ip": "172.16.106.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 29000,
# "in_rate_pkts": 40,
# "out_rate": 5000,
# "out_rate_pkts": 10
# },
# "last_clear": "never",
# "in_pkts": 3632279,
# "in_octets": 276659268,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 804940,
# "out_octets": 59536912,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 7,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan103": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.9.1/24": {
# "ip": "172.16.9.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 29000,
# "in_rate_pkts": 50,
# "out_rate": 5000,
# "out_rate_pkts": 10
# },
# "last_clear": "never",
# "in_pkts": 3673834,
# "in_octets": 279772748,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813848,
# "out_octets": 60159890,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan104": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.169.1/24": {
# "ip": "172.16.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 50,
# "out_rate": 5000,
# "out_rate_pkts": 10
# },
# "last_clear": "never",
# "in_pkts": 3673653,
# "in_octets": 279762130,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813767,
# "out_octets": 60155916,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan105": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.76.1/24": {
# "ip": "172.16.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 29000,
# "in_rate_pkts": 50,
# "out_rate": 5000,
# "out_rate_pkts": 10
# },
# "last_clear": "never",
# "in_pkts": 3673610,
# "in_octets": 279756472,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813890,
# "out_octets": 60162584,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan106": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.240.1/24": {
# "ip": "172.16.240.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673779,
# "in_octets": 279773894,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813865,
# "out_octets": 60163538,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan107": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.151.1/24": {
# "ip": "172.16.151.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673882,
# "in_octets": 279781700,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813903,
# "out_octets": 60165230,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan108": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.64.1/24": {
# "ip": "172.16.64.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673638,
# "in_octets": 279766630,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813842,
# "out_octets": 60162384,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan109": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.234.1/24": {
# "ip": "172.16.234.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673894,
# "in_octets": 279781274,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 817800,
# "out_octets": 62192557,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan110": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.151.1/24": {
# "ip": "172.16.151.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674136,
# "in_octets": 279796126,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813960,
# "out_octets": 60168004,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan111": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.70.1/24": {
# "ip": "172.16.70.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673792,
# "in_octets": 279763870,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 822081,
# "out_octets": 60848654,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan112": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.246.1/24": {
# "ip": "172.16.246.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673848,
# "in_octets": 279779396,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813978,
# "out_octets": 60170234,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan113": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.169.1/24": {
# "ip": "172.16.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674092,
# "in_octets": 279792690,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813962,
# "out_octets": 60168782,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan114": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.94.1/24": {
# "ip": "172.16.94.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674118,
# "in_octets": 279801252,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813964,
# "out_octets": 60167610,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan115": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.21.1/24": {
# "ip": "172.16.21.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 52,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3688257,
# "in_octets": 280917432,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813950,
# "out_octets": 60167218,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan116": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.205.1/24": {
# "ip": "172.16.205.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 50,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674429,
# "in_octets": 279815742,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 816877,
# "out_octets": 60383316,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan117": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.136.1/24": {
# "ip": "172.16.136.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 50,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674114,
# "in_octets": 279794536,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 814083,
# "out_octets": 60178182,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan118": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.69.1/24": {
# "ip": "172.16.69.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 31000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3674811,
# "in_octets": 279845876,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813994,
# "out_octets": 60171406,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan119": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.4.1/24": {
# "ip": "172.16.4.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 51,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3691322,
# "in_octets": 281116276,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 814073,
# "out_octets": 60175212,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan120": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.196.1/24": {
# "ip": "172.16.196.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 30000,
# "in_rate_pkts": 50,
# "out_rate": 6000,
# "out_rate_pkts": 11
# },
# "last_clear": "never",
# "in_pkts": 3673948,
# "in_octets": 279785038,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 813996,
# "out_octets": 60171120,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan121": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.135.1/24": {
# "ip": "172.16.135.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:20",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan122": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.76.1/24": {
# "ip": "172.16.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:20",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan123": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.19.1/24": {
# "ip": "172.16.19.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:20",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan124": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.219.1/24": {
# "ip": "172.16.219.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:20",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan125": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.166.1/24": {
# "ip": "172.16.166.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:24",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan126": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.115.1/24": {
# "ip": "172.16.115.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:24",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan127": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.66.1/24": {
# "ip": "172.16.66.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:24",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan128": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.19.1/24": {
# "ip": "172.16.19.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:28",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan129": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.229.1/24": {
# "ip": "172.16.229.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:28",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan130": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.186.1/24": {
# "ip": "172.16.186.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:28",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan131": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.145.1/24": {
# "ip": "172.16.145.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:28",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan132": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.106.1/24": {
# "ip": "172.16.106.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:32",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan133": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.69.1/24": {
# "ip": "172.16.69.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:32",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan134": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.34.1/24": {
# "ip": "172.16.34.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:32",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan135": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.1.1/24": {
# "ip": "172.16.1.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:32",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan136": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.225.1/24": {
# "ip": "172.16.225.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:37",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan137": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.196.1/24": {
# "ip": "172.16.196.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:37",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan138": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.169.1/24": {
# "ip": "172.16.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:37",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan139": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.144.1/24": {
# "ip": "172.16.144.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:41",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Vlan140": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "autostate": True,
# "type": "Ethernet SVI",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.121.1/24": {
# "ip": "172.16.121.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "06:39:41",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "out_pkts": 163,
# "out_octets": 14018,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "GigabitEthernet0/0": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "RP management port",
# "mac_address": "70b3.17ff.6560",
# "phys_address": "70b3.17ff.6560",
# "ipv4": {
# "10.9.1.20/16": {
# "ip": "10.9.1.20",
# "prefix_length": "16"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "full",
# 'port_speed': '1000mbps',
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "RJ45",
# "flow_control": {
# "receive": False,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:15",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 818000,
# "in_rate_pkts": 675,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 10341900,
# "in_octets": 2319228471,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "out_pkts": 8840,
# "out_octets": 993196,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6560",
# "phys_address": "70b3.17ff.6560",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:20",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/2": {
# "port_channel": {
# "port_channel_member": True,
# "port_channel_int": "Port-channel2"
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'auto_negotiate': False,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'port_speed': '40gb/s',
# 'media_type': 'QSFP 40G SR4 SFP',
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6561",
# "phys_address": "70b3.17ff.6561",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:03",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 293000,
# "in_rate_pkts": 454,
# "out_rate": 58000,
# "out_rate_pkts": 104
# },
# "last_clear": "20:01:24",
# "in_pkts": 32521304,
# "in_octets": 2684387777,
# "in_no_buffer": 0,
# "in_multicast_pkts": 1476582,
# "in_broadcast_pkts": 1476582,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 7498024,
# "out_octets": 525513005,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 2,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/3": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6562",
# "phys_address": "70b3.17ff.6562",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:24",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/4": {
# "port_channel": {
# "port_channel_member": True,
# "port_channel_int": "Port-channel2"
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'auto_negotiate': False,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'port_speed': '40gb/s',
# 'media_type': 'QSFP 40G SR BD SFP',
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6563",
# "phys_address": "70b3.17ff.6563",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:15",
# "last_output": "00:00:03",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 102000,
# "in_rate_pkts": 186,
# "out_rate": 329000,
# "out_rate_pkts": 524
# },
# "last_clear": "20:01:24",
# "in_pkts": 13376239,
# "in_octets": 910225278,
# "in_no_buffer": 0,
# "in_multicast_pkts": 6304,
# "in_broadcast_pkts": 6304,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 37674953,
# "out_octets": 3020267756,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/5": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6564",
# "phys_address": "70b3.17ff.6564",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:28",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/6": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6565",
# "phys_address": "70b3.17ff.6565",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:28",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/7": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6566",
# "phys_address": "70b3.17ff.6566",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:33",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/8": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6567",
# "phys_address": "70b3.17ff.6567",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:33",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/9": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6568",
# "phys_address": "70b3.17ff.6568",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:33",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/10": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6569",
# "phys_address": "70b3.17ff.6569",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:37",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/11": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656a",
# "phys_address": "70b3.17ff.656a",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:37",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/12": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656b",
# "phys_address": "70b3.17ff.656b",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:37",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/13": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656c",
# "phys_address": "70b3.17ff.656c",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/14": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656d",
# "phys_address": "70b3.17ff.656d",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/15": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656e",
# "phys_address": "70b3.17ff.656e",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/16": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.656f",
# "phys_address": "70b3.17ff.656f",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/17": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6570",
# "phys_address": "70b3.17ff.6570",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/18": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6571",
# "phys_address": "70b3.17ff.6571",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/19": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6572",
# "phys_address": "70b3.17ff.6572",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:50",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/20": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6573",
# "phys_address": "70b3.17ff.6573",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:50",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/21": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6574",
# "phys_address": "70b3.17ff.6574",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:50",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/22": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6575",
# "phys_address": "70b3.17ff.6575",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:54",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/23": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6576",
# "phys_address": "70b3.17ff.6576",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:54",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/24": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6577",
# "phys_address": "70b3.17ff.6577",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:54",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/25": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6578",
# "phys_address": "70b3.17ff.6578",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:58",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/26": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.6579",
# "phys_address": "70b3.17ff.6579",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:58",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/27": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657a",
# "phys_address": "70b3.17ff.657a",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:01:58",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/28": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657b",
# "phys_address": "70b3.17ff.657b",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:02",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/29": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657c",
# "phys_address": "70b3.17ff.657c",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:02",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/30": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657d",
# "phys_address": "70b3.17ff.657d",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:02",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/31": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657e",
# "phys_address": "70b3.17ff.657e",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:07",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "FortyGigabitEthernet1/0/32": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Forty Gigabit Ethernet",
# "mac_address": "70b3.17ff.657f",
# "phys_address": "70b3.17ff.657f",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 40000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:07",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/33": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6580",
# "phys_address": "70b3.17ff.6580",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:07",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/34": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6581",
# "phys_address": "70b3.17ff.6581",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:11",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/35": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# 'auto_negotiate': False,
# "oper_status": "up",
# "connected": True,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'media_type': 'QSFP 100G SR4',
# "type": "Hundred Gigabit Ethernet",
# 'port_speed': '100gb/s',
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "description": "connected to Ixia 1/6",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "1"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:18",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 25000,
# "out_rate_pkts": 15
# },
# "last_clear": "20:02:11",
# "in_pkts": 550971,
# "in_octets": 121771829,
# "in_no_buffer": 0,
# "in_multicast_pkts": 172604,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 206,
# "in_throttles": 0,
# "in_errors": 206,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 1536769,
# "out_octets": 437624881,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 33,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/35.1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2503842,
# 'in_pkts': 13266,
# 'out_octets': 2168924,
# 'out_pkts': 13769
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.19.1/24": {
# "ip": "172.16.19.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "501"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.2": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2501935,
# 'in_pkts': 13254,
# 'out_octets': 2170079,
# 'out_pkts': 13784
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.76.1/24": {
# "ip": "172.16.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "502"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.3": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2505791,
# 'in_pkts': 13281,
# 'out_octets': 2169079,
# 'out_pkts': 13764
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.135.1/24": {
# "ip": "172.16.135.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "503"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.4": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2500301,
# 'in_pkts': 13273,
# 'out_octets': 2168845,
# 'out_pkts': 13766
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.196.1/24": {
# "ip": "172.16.196.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "504"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.5": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2497502,
# 'in_pkts': 13253,
# 'out_octets': 2167640,
# 'out_pkts': 13750
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.4.1/24": {
# "ip": "172.16.4.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "505"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.6": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2502193,
# 'in_pkts': 13261,
# 'out_octets': 2167636,
# 'out_pkts': 13744
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.69.1/24": {
# "ip": "172.16.69.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "506"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.7": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2513375,
# 'in_pkts': 13350,
# 'out_octets': 2169851,
# 'out_pkts': 13781
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.136.1/24": {
# "ip": "172.16.136.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "507"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.8": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2510082,
# 'in_pkts': 13292,
# 'out_octets': 2169702,
# 'out_pkts': 13777
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.205.1/24": {
# "ip": "172.16.205.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "508"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.9": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2511802,
# 'in_pkts': 13332,
# 'out_octets': 2169056,
# 'out_pkts': 13770
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.21.1/24": {
# "ip": "172.16.21.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "509"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.10": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2502910,
# 'in_pkts': 13282,
# 'out_octets': 2168425,
# 'out_pkts': 13777
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.94.1/24": {
# "ip": "172.16.94.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "510"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.11": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2501618,
# 'in_pkts': 13281,
# 'out_octets': 2168163,
# 'out_pkts': 13756
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.169.1/24": {
# "ip": "172.16.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "511"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.12": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2502717,
# 'in_pkts': 13255,
# 'out_octets': 2168956,
# 'out_pkts': 13765
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.246.1/24": {
# "ip": "172.16.246.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "512"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.13": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2502358,
# 'in_pkts': 13266,
# 'out_octets': 2169451,
# 'out_pkts': 13773
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.70.1/24": {
# "ip": "172.16.70.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "513"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.14": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2513180,
# 'in_pkts': 13347,
# 'out_octets': 2171050,
# 'out_pkts': 13794
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.151.1/24": {
# "ip": "172.16.151.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "514"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.15": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2497442,
# 'in_pkts': 13260,
# 'out_octets': 2169487,
# 'out_pkts': 13787
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.234.1/24": {
# "ip": "172.16.234.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "515"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.16": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2512146,
# 'in_pkts': 13336,
# 'out_octets': 2169512,
# 'out_pkts': 13773
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.64.1/24": {
# "ip": "172.16.64.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "516"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.17": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2505612,
# 'in_pkts': 13287,
# 'out_octets': 2170930,
# 'out_pkts': 13796
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.151.1/24": {
# "ip": "172.16.151.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "517"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.18": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2502019,
# 'in_pkts': 13263,
# 'out_octets': 2169941,
# 'out_pkts': 13780
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.240.1/24": {
# "ip": "172.16.240.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "518"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.19": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2509614,
# 'in_pkts': 13353,
# 'out_octets': 2170375,
# 'out_pkts': 13787
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.76.1/24": {
# "ip": "172.16.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "519"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.20": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2504934,
# 'in_pkts': 13280,
# 'out_octets': 2169331,
# 'out_pkts': 13772
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.169.1/24": {
# "ip": "172.16.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "520"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.101": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598532,
# 'in_pkts': 25478,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.9.1/24": {
# "ip": "192.168.9.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "101"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.102": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598430,
# 'in_pkts': 25477,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.106.1/24": {
# "ip": "192.168.106.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "102"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.103": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598634,
# 'in_pkts': 25479,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.205.1/24": {
# "ip": "192.168.205.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "103"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.104": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598634,
# 'in_pkts': 25479,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.51.1/24": {
# "ip": "192.168.51.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "104"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.105": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598430,
# 'in_pkts': 25477,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.154.1/24": {
# "ip": "192.168.154.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "105"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.106": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598634,
# 'in_pkts': 25479,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.4.1/24": {
# "ip": "192.168.4.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "106"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.107": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598344,
# 'in_pkts': 25476,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.111.1/24": {
# "ip": "192.168.111.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "107"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.108": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598532,
# 'in_pkts': 25478,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.220.1/24": {
# "ip": "192.168.220.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "108"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.109": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598532,
# 'in_pkts': 25478,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.76.1/24": {
# "ip": "192.168.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "109"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/35.110": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 2598532,
# 'in_pkts': 25478,
# 'out_octets': 0,
# 'out_pkts': 0
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.189.1/24": {
# "ip": "192.168.189.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "110"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/36": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6583",
# "phys_address": "70b3.17ff.6583",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:32",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/37": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6584",
# "phys_address": "70b3.17ff.6584",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:32",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/38": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6585",
# "phys_address": "70b3.17ff.6585",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:32",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/39": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6586",
# "phys_address": "70b3.17ff.6586",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:37",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/40": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6587",
# "phys_address": "70b3.17ff.6587",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:37",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/41": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'duplex_mode': 'full',
# 'auto_negotiate': False,
# 'link_type': 'force-up',
# "type": "Hundred Gigabit Ethernet",
# 'media_type': 'QSFP 100G SR4',
# 'port_speed': '100gb/s',
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.4.2/24": {
# "ip": "172.16.4.2",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:01",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 30,
# "in_rate": 39000,
# "in_rate_pkts": 50,
# "out_rate": 35000,
# "out_rate_pkts": 48
# },
# "last_clear": "20:02:37",
# "in_pkts": 3581103,
# "in_octets": 340490834,
# "in_no_buffer": 0,
# "in_multicast_pkts": 20089,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 3494815,
# "out_octets": 323841840,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 5,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/42": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6589",
# "phys_address": "70b3.17ff.6589",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/43": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.658a",
# "phys_address": "70b3.17ff.658a",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/44": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.658b",
# "phys_address": "70b3.17ff.658b",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:41",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/45": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.658c",
# "phys_address": "70b3.17ff.658c",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/46": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.658d",
# "phys_address": "70b3.17ff.658d",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/47": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "down",
# "oper_status": "down",
# "connected": False,
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.658e",
# "phys_address": "70b3.17ff.658e",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "auto",
# "port_speed": "auto",
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "unknown",
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:02:45",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/48": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# 'auto_negotiate': False,
# "connected": True,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'media_type': 'QSFP',
# 'port_speed': '100',
# 'media_type': 'QSFP 100G SR4',
# 'port_speed': '100gb/s',
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "172.16.94.2/24": {
# "ip": "172.16.94.2",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "1"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 3,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 30,
# "in_rate": 330000,
# "in_rate_pkts": 550,
# "out_rate": 301000,
# "out_rate_pkts": 547
# },
# "last_clear": "20:02:49",
# "in_pkts": 39665255,
# "in_octets": 3012714995,
# "in_no_buffer": 0,
# "in_multicast_pkts": 548066,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 39424533,
# "out_octets": 2729787452,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 16,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "HundredGigE1/0/48.1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222402380,
# 'in_pkts': 3426695,
# 'out_octets': 112615606,
# 'out_pkts': 1729535
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.51.1/24": {
# "ip": "192.168.51.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "201"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.2": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222808882,
# 'in_pkts': 3430077,
# 'out_octets': 113033370,
# 'out_pkts': 1733061
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.205.1/24": {
# "ip": "192.168.205.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "202"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.3": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222402736,
# 'in_pkts': 3426685,
# 'out_octets': 112614680,
# 'out_pkts': 1729514
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.106.1/24": {
# "ip": "192.168.106.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "203"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.4": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222417026,
# 'in_pkts': 3426926,
# 'out_octets': 112627684,
# 'out_pkts': 1729722
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.9.1/24": {
# "ip": "192.168.9.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "204"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.5": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222416748,
# 'in_pkts': 3426916,
# 'out_octets': 112626186,
# 'out_pkts': 1729694
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.169.1/24": {
# "ip": "192.168.169.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "205"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.6": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222434908,
# 'in_pkts': 3427206,
# 'out_octets': 112633620,
# 'out_pkts': 1729813
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.76.1/24": {
# "ip": "192.168.76.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "206"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.7": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222419906,
# 'in_pkts': 3426971,
# 'out_octets': 112634178,
# 'out_pkts': 1729823
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.240.1/24": {
# "ip": "192.168.240.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "207"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.8": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222419256,
# 'in_pkts': 3426971,
# 'out_octets': 112634398,
# 'out_pkts': 1729821
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.151.1/24": {
# "ip": "192.168.151.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "208"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.9": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222412094,
# 'in_pkts': 3426848,
# 'out_octets': 112626654,
# 'out_pkts': 1729707
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.64.1/24": {
# "ip": "192.168.64.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "209"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "HundredGigE1/0/48.10": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'counters': {
# 'in_octets': 222430124,
# 'in_pkts': 3427137,
# 'out_octets': 112632450,
# 'out_pkts': 1729798
# },
# "type": "Hundred Gigabit Ethernet",
# "mac_address": "70b3.17ff.6500",
# "phys_address": "70b3.17ff.6500",
# "ipv4": {
# "192.168.234.1/24": {
# "ip": "192.168.234.1",
# "prefix_length": "24"
# }
# },
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 100000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "210"
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "keepalive": 10
# },
# "Bluetooth0/4": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": False,
# "line_protocol": "down",
# "oper_status": "down",
# "type": "BT management port",
# "mac_address": "70b3.17ff.6560",
# "phys_address": "70b3.17ff.6560",
# "delay": 10,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": False,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 1,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Port-channel2": {
# "port_channel": {
# "port_channel_member": True,
# "port_channel_member_intfs": [
# "FortyGigabitEthernet1/0/2",
# "FortyGigabitEthernet1/0/4"
# ]
# },
# 'port_speed': '40gb/s',
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "connected": True,
# 'auto_negotiate': False,
# "type": "EtherChannel",
# "mac_address": "70b3.17ff.6561",
# "phys_address": "70b3.17ff.6561",
# "delay": 10,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'media_type': 'N/A',
# "mtu": 1500,
# "bandwidth": 80000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "flow_control": {
# "receive": True,
# "send": False
# },
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:04:37",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 2000,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 389000,
# "in_rate_pkts": 630,
# "out_rate": 385000,
# "out_rate_pkts": 622
# },
# "last_clear": "never",
# "in_pkts": 45955737,
# "in_octets": 3599101746,
# "in_no_buffer": 0,
# "in_multicast_pkts": 1484746,
# "in_broadcast_pkts": 1484746,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "in_with_dribble": 0,
# "out_pkts": 45228880,
# "out_octets": 3550088514,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 1,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Loopback1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Loopback",
# "ipv4": {
# "192.168.154.1/32": {
# "ip": "192.168.154.1",
# "prefix_length": "32"
# }
# },
# "delay": 5000,
# "mtu": 1514,
# "bandwidth": 8000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "loopback"
# },
# "keepalive": 10,
# "last_input": "00:00:43",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 1383,
# "out_octets": 33608,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 1375,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Loopback10": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Loopback",
# "delay": 5000,
# "mtu": 1514,
# "bandwidth": 8000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "loopback"
# },
# "keepalive": 10,
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Loopback101": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Loopback",
# "ipv4": {
# "10.204.1.2/32": {
# "ip": "10.204.1.2",
# "prefix_length": "32"
# }
# },
# "delay": 5000,
# "mtu": 1514,
# "bandwidth": 8000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "loopback"
# },
# "keepalive": 10,
# "last_input": "00:00:22",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 1338,
# "out_octets": 159232,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Loopback102": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Loopback",
# "ipv4": {
# "10.154.1.2/32": {
# "ip": "10.154.1.2",
# "prefix_length": "32"
# }
# },
# "delay": 5000,
# "mtu": 1514,
# "bandwidth": 8000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "loopback"
# },
# "keepalive": 10,
# "last_input": "00:00:16",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 1343,
# "out_octets": 160112,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel0": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "description": "Pim Register Tunnel (Encap) for Embedded RP",
# "delay": 50000,
# "mtu": 1452,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "never",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:05",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 0,
# "out_octets": 0,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel1": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.25.1/24": {
# "ip": "172.16.25.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11176,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:11",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27765,
# "out_octets": 2695512,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel2": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.121.1/24": {
# "ip": "172.16.121.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:04",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11178,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:11",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27789,
# "out_octets": 2697642,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel3": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.219.1/24": {
# "ip": "172.16.219.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:01",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11179,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:11",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27780,
# "out_octets": 2696882,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel4": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.64.1/24": {
# "ip": "172.16.64.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:01",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11180,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:15",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27765,
# "out_octets": 2695606,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel5": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.166.1/24": {
# "ip": "172.16.166.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:01",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11176,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:15",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27769,
# "out_octets": 2695894,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel6": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.15.1/24": {
# "ip": "172.16.15.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:02",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11172,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:19",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27752,
# "out_octets": 2694338,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel7": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.121.1/24": {
# "ip": "172.16.121.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:02",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11176,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:19",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27778,
# "out_octets": 2696668,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel8": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.229.1/24": {
# "ip": "172.16.229.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:02",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11176,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:19",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27756,
# "out_octets": 2694776,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel9": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.84.1/24": {
# "ip": "172.16.84.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "never",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 11176,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "20:03:23",
# "in_pkts": 0,
# "in_octets": 0,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 27775,
# "out_octets": 2696372,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# },
# "Tunnel10": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "Tunnel",
# "ipv4": {
# "172.16.186.1/24": {
# "ip": "172.16.186.1",
# "prefix_length": "24"
# }
# },
# "delay": 50000,
# "mtu": 17868,
# "bandwidth": 100,
# "reliability": "255/255",
# "txload": "40/255",
# "rxload": "135/255",
# "encapsulations": {
# "encapsulation": "tunnel"
# },
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 75,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 34678,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 0
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 53000,
# "in_rate_pkts": 94,
# "out_rate": 16000,
# "out_rate_pkts": 23
# },
# "last_clear": "20:03:23",
# "in_pkts": 6832599,
# "in_octets": 479845002,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_abort": 0,
# "out_pkts": 1674895,
# "out_octets": 151072685,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 0,
# "out_collision": 0,
# "out_unknown_protocl_drops": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# }
# }
# golden_parsed_interface_output_2 = {
# "TenGigabitEthernet0/2/0": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "SPA-1X10GE-L-V2",
# "mac_address": "006b.f1ff.bef5",
# "phys_address": "006b.f1ff.bef5",
# "description": "toP",
# "ipv4": {
# "10.169.197.94/30": {
# "ip": "10.169.197.94",
# "prefix_length": "30"
# }
# },
# "delay": 10,
# "mtu": 1552,
# "bandwidth": 10000000,
# "reliability": "255/255",
# "txload": "2/255",
# "rxload": "2/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "flow_control": {
# "receive": True,
# "send": True
# },
# "carrier_delay_up": 2,
# "carrier_delay_down": 10,
# "arp_type": "arpa",
# 'auto_negotiate': False,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'media_type': '10GBase-SR/SW',
# 'port_speed': '10000mbps',
# "arp_timeout": "04:00:00",
# "last_input": "00:07:19",
# "last_output": "03:51:33",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 79676000,
# "in_rate_pkts": 9999,
# "out_rate": 79998000,
# "out_rate_pkts": 9999
# },
# "last_clear": "never",
# "in_pkts": 1779405333,
# "in_octets": 1772200805652,
# "in_no_buffer": 0,
# "in_multicast_pkts": 60322,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "out_pkts": 1791189623,
# "out_octets": 1790956453417,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_collision": 0,
# "out_unknown_protocl_drops": 291,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# }
# }
# golden_interface_output_2 = {'execute.return_value': '''
# PE1>show interfaces TenGigabitEthernet 0/2/0
# Load for five secs: 3%/0%; one minute: 3%; five minutes: 3%
# Time source is NTP, 17:32:09.532 EST Tue Apr 23 2019
# TenGigabitEthernet0/2/0 is up, line protocol is up
# Hardware is SPA-1X10GE-L-V2, address is 006b.f1ff.bef5 (bia 006b.f1ff.bef5)
# Description: toP
# Internet address is 10.169.197.94/30
# MTU 1552 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 2/255, rxload 2/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is 10GBase-SR/SW
# output flow-control is on, input flow-control is on
# Asymmetric Carrier-Delay Up Timer is 2 sec
# Asymmetric Carrier-Delay Down Timer is 10 sec
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:07:19, output 03:51:33, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 79676000 bits/sec, 9999 packets/sec
# 5 minute output rate 79998000 bits/sec, 9999 packets/sec
# 1779405333 packets input, 1772200805652 bytes, 0 no buffer
# Received 3 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 60322 multicast, 0 pause input
# 1791189623 packets output, 1790956453417 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 291 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# PE1>
# '''}
# golden_parsed_interface_output_3 = {
# "GigabitEthernet3": {
# "port_channel": {
# "port_channel_member": False
# },
# "enabled": True,
# "line_protocol": "up",
# "oper_status": "up",
# "type": "CSR vNIC",
# "mac_address": "fa16.3eff.8a36",
# "phys_address": "fa16.3eff.8a36",
# "ipv4": {
# "10.0.2.1/24": {
# "ip": "10.0.2.1",
# "prefix_length": "24"
# }
# },
# "delay": 600,
# "mtu": 1500,
# "bandwidth": 1000000,
# "reliability": "255/255",
# "txload": "1/255",
# "rxload": "1/255",
# "encapsulations": {
# "encapsulation": "arpa"
# },
# "keepalive": 10,
# "duplex_mode": "full",
# 'port_speed': '1000mbps',
# "link_type": "auto",
# "auto_negotiate": True,
# "media_type": "Virtual",
# "flow_control": {
# "receive": False,
# "send": False
# },
# "carrier_delay": 10,
# "arp_type": "arpa",
# "arp_timeout": "04:00:00",
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "output_hang": "never",
# "queues": {
# "input_queue_size": 0,
# "input_queue_max": 375,
# "input_queue_drops": 0,
# "input_queue_flushes": 0,
# "total_output_drop": 0,
# "queue_strategy": "fifo",
# "output_queue_size": 0,
# "output_queue_max": 40
# },
# "counters": {
# "rate": {
# "load_interval": 300,
# "in_rate": 0,
# "in_rate_pkts": 0,
# "out_rate": 0,
# "out_rate_pkts": 0
# },
# "last_clear": "never",
# "in_pkts": 101744,
# "in_octets": 9327436,
# "in_no_buffer": 0,
# "in_multicast_pkts": 0,
# "in_broadcast_pkts": 0,
# "in_runts": 0,
# "in_giants": 0,
# "in_throttles": 0,
# "in_errors": 0,
# "in_crc_errors": 0,
# "in_frame": 0,
# "in_overrun": 0,
# "in_ignored": 0,
# "in_watchdog": 0,
# "in_mac_pause_frames": 0,
# "out_pkts": 65026,
# "out_octets": 7387154,
# "out_underruns": 0,
# "out_errors": 0,
# "out_interface_resets": 1,
# "out_collision": 0,
# "out_unknown_protocl_drops": 10110,
# "out_babble": 0,
# "out_late_collision": 0,
# "out_deferred": 0,
# "out_lost_carrier": 0,
# "out_no_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0
# }
# }
# }
# golden_interface_output_3 = {'execute.return_value': '''
# [2019-04-23 10:53:38,979] +++ csr1000v-1: executing command 'show interfaces GigabitEthernet3' +++
# show interfaces GigabitEthernet3
# GigabitEthernet3 is up, line protocol is up
# Hardware is CSR vNIC, address is fa16.3eff.8a36 (bia fa16.3eff.8a36)
# Internet address is 10.0.2.1/24
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 600 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full Duplex, 1000Mbps, link type is auto, media type is Virtual
# output flow-control is unsupported, input flow-control is unsupported
# Carrier delay is 10 sec
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 101744 packets input, 9327436 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 65026 packets output, 7387154 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 10110 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# csr1000v-1#
# '''}
# golden_interface_output_4 = { 'execute.return_value': '''
# GigabitEthernet0/0/0 is up, line protocol is up
# Hardware is BUILT-IN-EPA-8x1G, address is 1ca1.88ff.c119 (bia 1ca1.88ff.c119)
# Description: Genie to Genie-next for L2 Fiber lines ***
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 7/255, rxload 2/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Keepalive not supported
# Full Duplex, 1000Mbps, link type is auto, media type is T
# output flow-control is on, input flow-control is on
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/9483/6181 (size/max/drops/flushes); Total output drops: 85587314
# Queueing strategy: Class-based queueing
# Output queue: 0/40 (size/max)
# 5 minute input rate 10684000 bits/sec, 5031 packets/sec
# 5 minute output rate 28954000 bits/sec, 5003 packets/sec
# 37252955968 packets input, 25781698415464 bytes, 0 no buffer
# Received 110594334 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 1050221981 multicast, 0 pause input
# 35433262342 packets output, 29981591557915 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 11976504 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet0/0/0.105 is up, line protocol is up
# Hardware is BUILT-IN-EPA-8x1G, address is 1ca1.88ff.c119 (bia 1ca1.88ff.c119)
# Description: Another Genie L2 connection
# Internet address is 10.95.2.252/24
# MTU 1500 bytes, BW 100000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 7/255, rxload 2/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 105.
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive not supported
# Last clearing of "show interface" counters never
# GigabitEthernet0/0/0.1761524 is up, line protocol is up
# Hardware is BUILT-IN-EPA-8x1G, address is 1ca1.88ff.c119 (bia 1ca1.88ff.c119)
# Description: *** Genie VLAN ***
# Internet address is 10.121.113.98/27
# MTU 1500 bytes, BW 100000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 7/255, rxload 2/255
# Encapsulation QinQ Virtual LAN, outer ID 176, inner ID 1524
# ARP type: ARPA, ARP Timeout 04:00:00
# Keepalive not supported
# Last clearing of "show interface" counters never
# '''
# }
# golden_parsed_interface_output_4 = {
# "GigabitEthernet0/0/0": {
# "arp_timeout": "04:00:00",
# "arp_type": "arpa",
# "auto_negotiate": True,
# "bandwidth": 1000000,
# "counters": {
# "in_broadcast_pkts": 0,
# "in_crc_errors": 0,
# "in_errors": 0,
# "in_frame": 0,
# "in_giants": 0,
# "in_ignored": 0,
# "in_mac_pause_frames": 0,
# "in_multicast_pkts": 1050221981,
# "in_no_buffer": 0,
# "in_octets": 25781698415464,
# "in_overrun": 0,
# "in_pkts": 37252955968,
# "in_runts": 0,
# "in_throttles": 0,
# "in_watchdog": 0,
# "last_clear": "never",
# "out_babble": 0,
# "out_buffer_failure": 0,
# "out_buffers_swapped": 0,
# "out_collision": 0,
# "out_deferred": 0,
# "out_errors": 0,
# "out_interface_resets": 2,
# "out_late_collision": 0,
# "out_lost_carrier": 0,
# "out_mac_pause_frames": 0,
# "out_no_carrier": 0,
# "out_octets": 29981591557915,
# "out_pkts": 35433262342,
# "out_underruns": 0,
# "out_unknown_protocl_drops": 11976504,
# "rate": {
# "in_rate": 10684000,
# "in_rate_pkts": 5031,
# "load_interval": 300,
# "out_rate": 28954000,
# "out_rate_pkts": 5003
# }
# },
# "delay": 10,
# "description": "Genie to Genie-next for L2 Fiber lines ***",
# "duplex_mode": "full",
# "enabled": True,
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "1"
# },
# "flow_control": {
# "receive": True,
# "send": True
# },
# "last_input": "00:00:00",
# "last_output": "00:00:00",
# "line_protocol": "up",
# "link_type": "auto",
# "mac_address": "1ca1.88ff.c119",
# "media_type": "T",
# "mtu": 1500,
# "oper_status": "up",
# "output_hang": "never",
# "phys_address": "1ca1.88ff.c119",
# "port_channel": {
# "port_channel_member": False
# },
# 'port_speed': '1000mbps',
# "queues": {
# "input_queue_drops": 9483,
# "input_queue_flushes": 6181,
# "input_queue_max": 375,
# "input_queue_size": 0,
# "output_queue_max": 40,
# "output_queue_size": 0,
# 'queue_strategy': 'Class-based',
# "total_output_drop": 85587314
# },
# "reliability": "255/255",
# "rxload": "2/255",
# "txload": "7/255",
# "type": "BUILT-IN-EPA-8x1G"
# },
# "GigabitEthernet0/0/0.105": {
# "arp_timeout": "04:00:00",
# "arp_type": "arpa",
# "bandwidth": 100000,
# "delay": 10,
# "description": "Another Genie L2 connection",
# "enabled": True,
# "encapsulations": {
# "encapsulation": "dot1q",
# "first_dot1q": "105"
# },
# "ipv4": {
# "10.95.2.252/24": {
# "ip": "10.95.2.252",
# "prefix_length": "24"
# }
# },
# "line_protocol": "up",
# "mac_address": "1ca1.88ff.c119",
# "mtu": 1500,
# "oper_status": "up",
# "phys_address": "1ca1.88ff.c119",
# "port_channel": {
# "port_channel_member": False
# },
# "reliability": "255/255",
# "rxload": "2/255",
# "txload": "7/255",
# "type": "BUILT-IN-EPA-8x1G"
# },
# "GigabitEthernet0/0/0.1761524": {
# "arp_timeout": "04:00:00",
# "arp_type": "arpa",
# "bandwidth": 100000,
# "delay": 10,
# "description": "*** Genie VLAN ***",
# "enabled": True,
# "encapsulations": {
# "encapsulation": "qinq virtual lan",
# "first_dot1q": "176",
# "second_dot1q": "1524"
# },
# "ipv4": {
# "10.121.113.98/27": {
# "ip": "10.121.113.98",
# "prefix_length": "27"
# }
# },
# "line_protocol": "up",
# "mac_address": "1ca1.88ff.c119",
# "mtu": 1500,
# "oper_status": "up",
# "phys_address": "1ca1.88ff.c119",
# "port_channel": {
# "port_channel_member": False
# },
# "reliability": "255/255",
# "rxload": "2/255",
# "txload": "7/255",
# "type": "BUILT-IN-EPA-8x1G"
# },
# }
# def test_empty(self):
# self.device = Mock(**self.empty_output)
# interface_obj = ShowInterfaces(device=self.device)
# with self.assertRaises(SchemaEmptyParserError):
# parsed_output = interface_obj.parse()
# def test_golden(self):
# self.device = Mock(**self.golden_output)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse()
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_output)
# def test_show_interfaces(self):
# self.device = Mock(**self.golden_interface_output)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse(interface='GigabitEthernet1')
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_interface_output)
# def test_show_interfaces_2(self):
# self.device = Mock(**self.golden_interface_output_2)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse(interface='TenGigabitEthernet0/2/0')
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_interface_output_2)
# def test_show_interfaces_3(self):
# self.device = Mock(**self.golden_interface_output_3)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse(interface='GigabitEthernet3')
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_interface_output_3)
# def test_show_interfaces_4(self):
# self.device = Mock(**self.golden_interface_output_4)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse(interface='GigabitEthernet0/0/0')
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_interface_output_4)
# golden_interface_output_1 = {'execute.return_value' : '''
# Port-channel10 is up, line protocol is up
# Hardware is GEChannel, address is 006b.f1ff.be9f (bia 006b.f1ff.be9f)
# MTU 1500 bytes, BW 2000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Keepalive set (10 sec)
# ARP type: ARPA, ARP Timeout 04:00:00
# No. of active members in this channel: 2
# Member 0 : GigabitEthernet0/0/0 , Full-duplex, 1000Mb/s
# Member 1 : GigabitEthernet0/0/1 , Full-duplex, 1000Mb/s
# No. of PF_JUMBO supported members in this channel : 2
# Last input 00:00:01, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/750/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/80 (size/max)
# 5 minute input rate 27204000 bits/sec, 6797 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 73017 packets input, 5016308 bytes, 0 no buffer
# Received 12871133 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 4557345 multicast, 0 pause input
# 5887 packets output, 377641 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# 0 carrier transitions
# '''}
# golden_parsed_interface_output_1 = {
# 'Port-channel10': {
# 'port_channel': {
# 'port_channel_member': True,
# 'active_members': 2,
# 'port_channel_member_intfs': ['GigabitEthernet0/0/0', 'GigabitEthernet0/0/1'],
# 'num_of_pf_jumbo_supported_members': 2,
# },
# 'enabled': True,
# 'line_protocol': 'up',
# 'oper_status': 'up',
# 'type': 'GEChannel',
# 'mac_address': '006b.f1ff.be9f',
# 'phys_address': '006b.f1ff.be9f',
# 'delay': 10,
# 'mtu': 1500,
# 'bandwidth': 2000000,
# 'reliability': '255/255',
# 'txload': '1/255',
# 'rxload': '1/255',
# 'encapsulations': {
# 'encapsulation': 'dot1q',
# 'first_dot1q': '1',
# },
# 'keepalive': 10,
# 'arp_type': 'arpa',
# 'arp_timeout': '04:00:00',
# 'last_input': '00:00:01',
# 'last_output': '00:00:00',
# 'output_hang': 'never',
# 'queues': {
# 'input_queue_size': 0,
# 'input_queue_max': 750,
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'total_output_drop': 0,
# 'queue_strategy': 'fifo',
# 'output_queue_size': 0,
# 'output_queue_max': 80,
# },
# 'counters': {
# 'rate': {
# 'load_interval': 300,
# 'in_rate': 27204000,
# 'in_rate_pkts': 6797,
# 'out_rate': 0,
# 'out_rate_pkts': 0,
# },
# 'last_clear': 'never',
# 'in_pkts': 73017,
# 'in_octets': 5016308,
# 'in_no_buffer': 0,
# 'in_multicast_pkts': 4557345,
# 'in_broadcast_pkts': 0,
# 'in_runts': 0,
# 'in_giants': 0,
# 'in_throttles': 0,
# 'in_errors': 0,
# 'in_crc_errors': 0,
# 'in_frame': 0,
# 'in_overrun': 0,
# 'in_ignored': 0,
# 'in_watchdog': 0,
# 'in_mac_pause_frames': 0,
# 'out_pkts': 5887,
# 'out_octets': 377641,
# 'out_underruns': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_collision': 0,
# 'out_unknown_protocl_drops': 0,
# 'out_babble': 0,
# 'out_late_collision': 0,
# 'out_deferred': 0,
# 'out_lost_carrier': 0,
# 'out_no_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# },
# }
# }
# def test_show_interfaces_10(self):
# self.device = Mock(**self.golden_interface_output_1)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse(interface='Port-channel10')
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_interface_output_1)
# def test_golden2(self):
# self.device = Mock(**self.golden_output2)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse()
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_output2)
# golden_output_1 = {'execute.return_value': '''
# BDI105 is up, line protocol is up
# Hardware is BDI, address is 2c33.11ff.fbc7(bia 2c33.11ff.fbc7)
# Description: PXMS Connexion Explore CWS L2 / Primary VLAN for CHRH
# Internet address is 10.95.2.253/24
# MTU 1500 bytes, BW 100000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q VLAN, Vlan ID 105, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04: 00: 00
# Last input never, output 01: 20: 01, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes)
# Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 2000 bits/sec, 2 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 35283845 packets input, 3929639714 bytes, 0 no buffer
# Received 0 broadcasts(0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 1933865 packets output, 287424110 bytes, 0 underruns
# 0 output errors, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/86 is administratively down, line protocol is down
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.311f (bia 2c33.11ff.311f)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/6 is administratively down, line protocol is down
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.311f (bia 2c33.11ff.311f)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/7 is administratively down, line protocol is down
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.3120 (bia 2c33.11ff.3120)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# GigabitEthernet0 is up, line protocol is up
# Hardware is RP management port, address is 2c33.11ff.3149 (bia 2c33.11ff.3149)
# Internet address is 172.31.0.24/16
# MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive set (10 sec)
# Full Duplex, 1000Mbps, link type is auto, media type is RJ45
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/2586 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 5000 bits/sec, 6 packets/sec
# 5 minute output rate 3000 bits/sec, 4 packets/sec
# 246659819 packets input, 31345442345 bytes, 0 no buffer
# Received 21865326 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 191782907 packets output, 24622021354 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 1 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# BDI106 is up, line protocol is up
# Hardware is BDI, address is 2c33.11ff.32c7 (bia 2c33.11ff.32c7)
# Description: PXMS connexion Explore CWS L2 / Backup VLAN for CHRH
# Internet address is 10.1.2.43/24
# MTU 1500 bytes, BW 100000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q VLAN, Vlan ID 106, loopback not set
# Keepalive not supported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output 25w2d, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 24 packets input, 1729 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 4930792 packets output, 442723849 bytes, 0 underruns
# 0 output errors, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Dialer1 is up (spoofing), line protocol is up (spoofing)
# Hardware is Unknown
# MTU 1492 bytes, BW 56 Kbit/sec, DLY 20000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation HDLC, loopback not set
# Keepalive set (10 sec)
# DTR is pulsed for 1 seconds on reset
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: weighted fair
# Output queue: 0/1000/64/0 (size/max total/threshold/drops)
# Conversations 0/0/16 (active/max active/max total)
# Reserved Conversations 0/0 (allocated/max allocated)
# Available Bandwidth 42 kilobits/sec
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes
# 0 packets output, 0 bytes
# Loopback50998 is up, line protocol is up
# Hardware is Loopback
# Internet address is 10.1.2.32/32
# MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation LOOPBACK, loopback not set
# Keepalive set (10 sec)
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# Tunnel1754 is up, line protocol is up
# Hardware is Tunnel
# Description: *** PXMS TUNNEL FGTB-Hornu - CID 102338277687
# Internet address is 10.210.226.13/30
# MTU 9976 bytes, BW 20000 Kbit/sec, DLY 20000 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation TUNNEL, loopback not set
# Keepalive not set
# Tunnel linestate evaluation up
# Tunnel source 172.16.121.201 (GigabitEthernet0/0/1.91), destination 172.16.64.36
# Tunnel Subblocks:
# src-track:
# Tunnel1754 source tracking subblock associated with GigabitEthernet0/0/1.91
# Set of tunnels with source GigabitEthernet0/0/1.91, 314 members (includes iterators), on interface <OK>
# Tunnel protocol/transport GRE/IP
# Key disabled, sequencing disabled
# Checksumming of packets disabled
# Tunnel TTL 255, Fast tunneling enabled
# Tunnel transport MTU 1468 bytes
# Tunnel transmit bandwidth 8000 (kbps)
# Tunnel receive bandwidth 8000 (kbps)
# Last input never, output never, output hang never
# Last clearing of "show interface" counters 25w2d
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/0 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 7105513 packets input, 2633533316 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
# 3442669 packets output, 409215038 bytes, 0 underruns
# 0 output errors, 0 collisions, 0 interface resets
# 0 unknown protocol drops
# 0 output buffer failures, 0 output buffers swapped out
# '''
# }
# golden_parsed_output_1 = {
# 'BDI105': {
# 'bandwidth': 100000,
# 'counters': {
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_no_buffer': 0,
# 'in_octets': 3929639714,
# 'in_overrun': 0,
# 'in_pkts': 35283845,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'last_clear': 'never',
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_octets': 287424110,
# 'out_pkts': 1933865,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 2000,
# 'in_rate_pkts': 2,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'description': 'PXMS Connexion Explore CWS L2 / Primary VLAN for CHRH',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': '802.1q vlan',
# 'first_dot1q': '105'
# },
# 'ipv4': {
# '10.95.2.253/24': {
# 'ip': '10.95.2.253',
# 'prefix_length': '24'
# }
# },
# 'line_protocol': 'up',
# 'mac_address': '2c33.11ff.fbc7',
# 'mtu': 1500,
# 'oper_status': 'up',
# 'phys_address': '2c33.11ff.fbc7',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo'
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BDI'
# },
# 'BDI106': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'bandwidth': 100000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 1729,
# 'in_overrun': 0,
# 'in_pkts': 24,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'last_clear': 'never',
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_octets': 442723849,
# 'out_pkts': 4930792,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'description': 'PXMS connexion Explore CWS L2 / Backup VLAN for CHRH',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': '802.1q vlan',
# 'first_dot1q': '106'
# },
# 'ipv4': {
# '10.1.2.43/24': {
# 'ip': '10.1.2.43',
# 'prefix_length': '24'
# }
# },
# 'last_input': 'never',
# 'last_output': '25w2d',
# 'line_protocol': 'up',
# 'mac_address': '2c33.11ff.32c7',
# 'mtu': 1500,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.32c7',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BDI'
# },
# 'Dialer1': {
# 'bandwidth': 56,
# 'connected': False,
# 'counters': {
# 'in_octets': 0,
# 'in_pkts': 0,
# 'last_clear': 'never',
# 'out_octets': 0,
# 'out_pkts': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 20000,
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'hdlc'
# },
# 'keepalive': 10,
# 'dtr_pulsed': '1',
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'up',
# 'mtu': 1492,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'queue_strategy': 'weighted',
# 'total_output_drop': 0,
# 'output_queue_max': 1000,
# 'output_queue_size': 0,
# 'threshold': 64,
# 'drops': 0,
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'Unknown'
# },
# 'GigabitEthernet0': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'auto_negotiate': True,
# 'bandwidth': 1000000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 31345442345,
# 'in_overrun': 0,
# 'in_pkts': 246659819,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_late_collision': 0,
# 'out_lost_carrier': 1,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 24622021354,
# 'out_pkts': 191782907,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 5000,
# 'in_rate_pkts': 6,
# 'load_interval': 300,
# 'out_rate': 3000,
# 'out_rate_pkts': 4
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': False, 'send': False
# },
# 'ipv4': {
# '172.31.0.24/16': {
# 'ip': '172.31.0.24',
# 'prefix_length': '16'
# }
# },
# 'keepalive': 10,
# 'last_input': '00:00:00',
# 'last_output': '00:00:00',
# 'line_protocol': 'up',
# 'link_type': 'auto',
# 'mac_address': '2c33.11ff.3149',
# 'media_type': 'RJ45',
# 'mtu': 1500,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.3149',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'port_speed': '1000mbps',
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 2586,
# 'input_queue_max': 75,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'RP management port'
# },
# 'Loopback50998': {
# 'bandwidth': 8000000,
# 'counters': {
# 'in_abort': 0,
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'last_clear': 'never',
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 5000,
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'loopback'
# },
# 'ipv4': {
# '10.1.2.32/32': {
# 'ip': '10.1.2.32',
# 'prefix_length': '32'
# }
# },
# 'keepalive': 10,
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'up',
# 'mtu': 1514,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 75,
# 'input_queue_size': 0,
# 'output_queue_max': 0,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'Loopback'
# },
# 'TenGigabitEthernet0/1/6': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'bandwidth': 10000000,
# 'auto_negotiate': False,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 1,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'link_type': 'force-up',
# 'media_type': 'unknown',
# 'enabled': False,
# 'port_speed': '10000mbps',
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': False, 'send': False
# },
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'down',
# 'mac_address': '2c33.11ff.311f',
# 'mtu': 1500,
# 'oper_status': 'down',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.311f',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'TenGigabitEthernet0/1/7': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'bandwidth': 10000000,
# 'auto_negotiate': False,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 1,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': False,
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': False, 'send': False
# },
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'down',
# 'link_type': 'force-up',
# 'media_type': 'unknown',
# 'mac_address': '2c33.11ff.3120',
# 'mtu': 1500,
# 'oper_status': 'down',
# 'output_hang': 'never',
# 'port_speed': '10000mbps',
# 'phys_address': '2c33.11ff.3120',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'TenGigabitEthernet0/1/86': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'bandwidth': 10000000,
# 'auto_negotiate': False,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 1,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': False,
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': False, 'send': False
# },
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'down',
# 'link_type': 'force-up',
# 'media_type': 'unknown',
# 'port_speed': '10000mbps',
# 'mac_address': '2c33.11ff.311f',
# 'mtu': 1500,
# 'oper_status': 'down',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.311f',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'Tunnel1754': {
# 'bandwidth': 20000,
# 'counters': {
# 'in_abort': 0,
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 2633533316,
# 'in_overrun': 0,
# 'in_pkts': 7105513,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'last_clear': '25w2d',
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_errors': 0,
# 'out_interface_resets': 0,
# 'out_octets': 409215038,
# 'out_pkts': 3442669,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 20000,
# 'description': '*** PXMS TUNNEL FGTB-Hornu - CID 102338277687',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'tunnel'
# },
# 'ipv4': {
# '10.210.226.13/30': {
# 'ip': '10.210.226.13',
# 'prefix_length': '30'
# }
# },
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'up',
# 'mtu': 9976,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 0,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'Tunnel'
# }
# }
# golden_output_2 = {'execute.return_value': '''
# TenGigabitEthernet0/1/0 is up, line protocol is up
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.fa19 (bia 2c33.11ff.fa19)
# Internet address is 10.209.98.103/31
# MTU 4000 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is SFP-LR
# output flow-control is on, input flow-control is on
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 02:29:25, output 02:29:25, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 49998000 bits/sec, 6546 packets/sec
# 5 minute output rate 8598000 bits/sec, 1638 packets/sec
# 173550579294 packets input, 146338033143374 bytes, 0 no buffer
# Received 7 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 5592817 multicast, 0 pause input
# 39328190625 packets output, 16525140785118 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/5 is administratively down, line protocol is down
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.fa1e (bia 2c33.11ff.fa1e)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/6 is administratively down, line protocol is down
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.fa1f (bia 2c33.11ff.fa1f)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation ARPA, loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# output flow-control is unsupported, input flow-control is unsupported
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input never, output never, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: fifo
# Output queue: 0/40 (size/max)
# 5 minute input rate 0 bits/sec, 0 packets/sec
# 5 minute output rate 0 bits/sec, 0 packets/sec
# 0 packets input, 0 bytes, 0 no buffer
# Received 0 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 0 multicast, 0 pause input
# 0 packets output, 0 bytes, 0 underruns
# 0 output errors, 0 collisions, 1 interface resets
# 0 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# TenGigabitEthernet0/1/1 is up, line protocol is up
# Hardware is BUILT-IN-EPA-8x10G, address is 2c33.11ff.fa1a (bia 2c33.11ff.fa1a)
# Description: Internet OUT Link (Through ASA or Direct)
# MTU 1500 bytes, BW 10000000 Kbit/sec, DLY 10 usec,
# reliability 255/255, txload 1/255, rxload 1/255
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Keepalive not supported
# Full Duplex, 10000Mbps, link type is force-up, media type is SFP-LR
# output flow-control is on, input flow-control is on
# ARP type: ARPA, ARP Timeout 04:00:00
# Last input 00:00:00, output 00:00:00, output hang never
# Last clearing of "show interface" counters never
# Input queue: 0/375/1873/1370 (size/max/drops/flushes); Total output drops: 0
# Queueing strategy: Class-based queueing
# Output queue: 0/40 (size/max)
# 5 minute input rate 24128000 bits/sec, 2898 packets/sec
# 5 minute output rate 104000 bits/sec, 122 packets/sec
# 112310736139 packets input, 107581463084138 bytes, 0 no buffer
# Received 98185589 broadcasts (0 IP multicasts)
# 0 runts, 0 giants, 0 throttles
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 watchdog, 96415788 multicast, 0 pause input
# 1539949004 packets output, 174533887805 bytes, 0 underruns
# 0 output errors, 0 collisions, 2 interface resets
# 81113 unknown protocol drops
# 0 babbles, 0 late collision, 0 deferred
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 output buffer failures, 0 output buffers swapped out
# '''
# }
# golden_parsed_output_2 = {
# 'TenGigabitEthernet0/1/0': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'auto_negotiate': False,
# 'bandwidth': 10000000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 5592817,
# 'in_no_buffer': 0,
# 'in_octets': 146338033143374,
# 'in_overrun': 0,
# 'in_pkts': 173550579294,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 2,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 16525140785118,
# 'out_pkts': 39328190625,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 49998000,
# 'in_rate_pkts': 6546,
# 'load_interval': 300,
# 'out_rate': 8598000,
# 'out_rate_pkts': 1638
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': True,
# 'send': True
# },
# 'ipv4': {
# '10.209.98.103/31': {
# 'ip': '10.209.98.103',
# 'prefix_length': '31'
# }
# },
# 'last_input': '02:29:25',
# 'last_output': '02:29:25',
# 'line_protocol': 'up',
# 'link_type': 'force-up',
# 'mac_address': '2c33.11ff.fa19',
# 'media_type': 'SFP-LR',
# 'mtu': 4000,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.fa19',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'port_speed': '10000mbps',
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'TenGigabitEthernet0/1/1': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'auto_negotiate': False,
# 'bandwidth': 10000000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 96415788,
# 'in_no_buffer': 0,
# 'in_octets': 107581463084138,
# 'in_overrun': 0,
# 'in_pkts': 112310736139,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 2,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 174533887805,
# 'out_pkts': 1539949004,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 81113,
# 'rate': {
# 'in_rate': 24128000,
# 'in_rate_pkts': 2898,
# 'load_interval': 300,
# 'out_rate': 104000,
# 'out_rate_pkts': 122
# }
# },
# 'delay': 10,
# 'description': 'Internet OUT Link (Through ASA or '
# 'Direct)',
# 'duplex_mode': 'full',
# 'enabled': True,
# 'encapsulations': {
# 'encapsulation': 'dot1q',
# 'first_dot1q': '1'
# },
# 'flow_control': {
# 'receive': True,
# 'send': True
# },
# 'last_input': '00:00:00',
# 'last_output': '00:00:00',
# 'line_protocol': 'up',
# 'link_type': 'force-up',
# 'mac_address': '2c33.11ff.fa1a',
# 'media_type': 'SFP-LR',
# 'mtu': 1500,
# 'oper_status': 'up',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.fa1a',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'port_speed': '10000mbps',
# 'queues': {
# 'input_queue_drops': 1873,
# 'input_queue_flushes': 1370,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'Class-based',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'TenGigabitEthernet0/1/5': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'auto_negotiate': False,
# 'bandwidth': 10000000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 1,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': False,
# 'encapsulations': {'encapsulation': 'arpa'},
# 'flow_control': {'receive': False, 'send': False},
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'down',
# 'link_type': 'force-up',
# 'mac_address': '2c33.11ff.fa1e',
# 'media_type': 'unknown',
# 'mtu': 1500,
# 'oper_status': 'down',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.fa1e',
# 'port_channel': {'port_channel_member': False},
# 'port_speed': '10000mbps',
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# },
# 'TenGigabitEthernet0/1/6': {
# 'arp_timeout': '04:00:00',
# 'arp_type': 'arpa',
# 'auto_negotiate': False,
# 'bandwidth': 10000000,
# 'counters': {
# 'in_broadcast_pkts': 0,
# 'in_crc_errors': 0,
# 'in_errors': 0,
# 'in_frame': 0,
# 'in_giants': 0,
# 'in_ignored': 0,
# 'in_mac_pause_frames': 0,
# 'in_multicast_pkts': 0,
# 'in_no_buffer': 0,
# 'in_octets': 0,
# 'in_overrun': 0,
# 'in_pkts': 0,
# 'in_runts': 0,
# 'in_throttles': 0,
# 'in_watchdog': 0,
# 'last_clear': 'never',
# 'out_babble': 0,
# 'out_buffer_failure': 0,
# 'out_buffers_swapped': 0,
# 'out_collision': 0,
# 'out_deferred': 0,
# 'out_errors': 0,
# 'out_interface_resets': 1,
# 'out_late_collision': 0,
# 'out_lost_carrier': 0,
# 'out_mac_pause_frames': 0,
# 'out_no_carrier': 0,
# 'out_octets': 0,
# 'out_pkts': 0,
# 'out_underruns': 0,
# 'out_unknown_protocl_drops': 0,
# 'rate': {
# 'in_rate': 0,
# 'in_rate_pkts': 0,
# 'load_interval': 300,
# 'out_rate': 0,
# 'out_rate_pkts': 0
# }
# },
# 'delay': 10,
# 'duplex_mode': 'full',
# 'enabled': False,
# 'encapsulations': {
# 'encapsulation': 'arpa'
# },
# 'flow_control': {
# 'receive': False,
# 'send': False
# },
# 'last_input': 'never',
# 'last_output': 'never',
# 'line_protocol': 'down',
# 'link_type': 'force-up',
# 'mac_address': '2c33.11ff.fa1f',
# 'media_type': 'unknown',
# 'mtu': 1500,
# 'oper_status': 'down',
# 'output_hang': 'never',
# 'phys_address': '2c33.11ff.fa1f',
# 'port_channel': {
# 'port_channel_member': False
# },
# 'port_speed': '10000mbps',
# 'queues': {
# 'input_queue_drops': 0,
# 'input_queue_flushes': 0,
# 'input_queue_max': 375,
# 'input_queue_size': 0,
# 'output_queue_max': 40,
# 'output_queue_size': 0,
# 'queue_strategy': 'fifo',
# 'total_output_drop': 0
# },
# 'reliability': '255/255',
# 'rxload': '1/255',
# 'txload': '1/255',
# 'type': 'BUILT-IN-EPA-8x10G'
# }
# }
# def test_golden_1(self):
# self.device = Mock(**self.golden_output_1)
# interface_obj = ShowInterfaces(device=self.device)
# parsed_output = interface_obj.parse()
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_output_1)
# def test_golden_output_2(self):
# self.device = Mock(**self.golden_output_2)
# obj = ShowInterfaces(device = self.device)
# parsed_output = obj.parse()
# self.maxDiff = None
# self.assertEqual(parsed_output,self.golden_parsed_output_2)
#############################################################################
# unitest For Show ip interface
#############################################################################
class TestShowIpInterface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Vlan211": {
"security_level": "default",
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"address_determined_by": "configuration file",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"192.168.76.1/24": {
"prefix_length": "24",
"ip": "192.168.76.1",
"secondary": False,
"broadcast_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet0/0": {
"security_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"vrf": "Mgmt-vrf",
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.8.134/24": {
"prefix_length": "24",
"ip": "10.1.8.134",
"secondary": False,
"broadcast_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet2": {
"enabled": False,
"oper_status": "down"
},
"GigabitEthernet1/0/1": {
"security_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": False,
"oper_status": "down",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.1.1/24": {
"prefix_length": "24",
"ip": "10.1.1.1",
"secondary": False,
"broadcast_address": "255.255.255.255"
},
"10.2.2.2/24": {
"prefix_length": "24",
"ip": "10.2.2.2",
"secondary": True
},
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
'wccp': {
'redirect_outbound': False,
'redirect_inbound': False,
'redirect_exclude': False,
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"directed_broadcast_forwarding": False,
"ip_flow_switching": False,
"input_features": ["MCI Check", "QoS Classification", "QoS Marking"],
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
Internet address is 192.168.76.1/24
Broadcast address is 255.255.255.255
Address determined by configuration file
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet0/0 is up, line protocol is up
Internet address is 10.1.8.134/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "Mgmt-vrf"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet1/0/1 is administratively down, line protocol is down
Internet address is 10.1.1.1/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Secondary address 10.2.2.2/24
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: QoS Classification, QoS Marking, MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
GigabitEthernet2 is administratively down, line protocol is down
Internet protocol processing disabled
'''}
golden_interface_output = {'execute.return_value':'''
CE1#show ip interface GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Internet address is 172.16.1.243/24
Broadcast address is 255.255.255.255
Address determined by DHCP
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_interface_output = {
"GigabitEthernet1": {
"ip_multicast_fast_switching": True,
"oper_status": "up",
"ip_output_packet_accounting": False,
"address_determined_by": "DHCP",
"rtp_ip_header_compression": False,
"ip_multicast_distributed_fast_switching": False,
"wccp": {
"redirect_exclude": False,
"redirect_outbound": False,
"redirect_inbound": False
},
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
}
},
"router_discovery": False,
"tcp_ip_header_compression": False,
"probe_proxy_name_replies": False,
"local_proxy_arp": False,
"policy_routing": False,
"mtu": 1500,
"icmp": {
"mask_replies": "never sent",
"unreachables": "always sent",
"redirects": "always sent"
},
"enabled": True,
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"ip_cef_switching": True,
"ip_fast_switching": True,
"security_level": "default",
"directed_broadcast_forwarding": False,
"proxy_arp": True,
"ip_null_turbo_vector": True,
"network_address_translation": False,
"input_features": [
"MCI Check"
],
"bgp_policy_mapping": False,
"split_horizon": True,
"ip_access_violation_accounting": False,
"ip_cef_switching_turbo_vector": True,
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24",
"broadcast_address": "255.255.255.255",
"secondary": False
}
},
"ip_flow_switching": False
}
}
golden_parsed_output2 = {
'GigabitEthernet1': {
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': False,
'icmp': {
'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {
'dhcp_negotiated': {
'broadcast_address': '255.255.255.255',
'ip': 'dhcp_negotiated'}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'down',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {
'topology': {
'base': {
'status': 'down'}}},
'wccp': {
'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}}}
golden_output2 = {'execute.return_value': '''
GigabitEthernet1 is administratively down, line protocol is down
Internet address will be negotiated using DHCP
Broadcast address is 255.255.255.255
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is DOWN
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''}
golden_parsed_output4 = {
'GigabitEthernet0/0/0': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.236.74.233/30': {'broadcast_address': '255.255.255.255',
'ip': '10.236.74.233',
'prefix_length': '30',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {
'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'GigabitEthernet0/0/1.110': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.212.2.37', '10.212.0.71'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.99.235.1/25': {'broadcast_address':
'255.255.255.255',
'ip': '10.99.235.1',
'prefix_length': '25'
,
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'GigabitEthernet0/0/1.304': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.99.216.4'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.194.235.1/25': {'broadcast_address':'255.255.255.255',
'ip': '10.194.235.1' ,
'prefix_length': '25',
'secondary': False}}
,
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}}}
golden_output4 = {
"execute.return_value":
'GigabitEthernet0/0/0 is up, line protocol is up\n'
' Internet address is 10.236.74.233/30\n'
' Broadcast address is 255.255.255.255\n'
' Address determined by non-volatile memory\n'
' MTU is 1500 bytes\n'
' Helper address is not set\n'
' Directed broadcast forwarding is disabled\n'
' Outgoing Common access list is not set\n'
' Outgoing access list is not set\n'
' Inbound Common access list is not set\n'
' Inbound access list is not set\n'
' Proxy ARP is enabled\n'
' Local Proxy ARP is disabled\n'
' Security level is default\n'
' Split horizon is enabled\n'
' ICMP redirects are always sent\n'
' ICMP unreachables are always sent\n'
' ICMP mask replies are never sent\n'
' IP fast switching is enabled\n'
' IP Flow switching is disabled\n'
' IP CEF switching is enabled\n'
' IP CEF switching turbo vector\n'
' IP Null turbo vector\n'
' Associated unicast routing topologies:\n'
' Topology "base", operation state is UP\n'
' IP multicast fast switching is enabled\n'
' IP multicast distributed fast switching is disabled\n'
' IP route-cache flags are Fast, CEF\n'
' Router Discovery is disabled\n'
' IP output packet accounting is disabled\n'
' IP access violation accounting is disabled\n'
' TCP/IP header compression is disabled\n'
' RTP/IP header compression is disabled\n'
' Probe proxy name replies are disabled\n'
' Policy routing is disabled\n'
' Network address translation is disabled\n'
' BGP Policy Mapping is disabled\n'
' Input features: MCI Check\n'
' IPv4 WCCP Redirect outbound is disabled\n'
' IPv4 WCCP Redirect inbound is disabled\n'
' IPv4 WCCP Redirect exclude is disabled\n'
'GigabitEthernet0/0/1.110 is up, line protocol is up\n'
' Internet address is 10.99.235.1/25\n'
' Broadcast address is 255.255.255.255\n'
' Address determined by non-volatile memory\n'
' MTU is 1500 bytes\n'
' Helper addresses are 10.212.2.37\n'
' 10.212.0.71\n'
' Directed broadcast forwarding is disabled\n'
' Outgoing Common access list is not set\n'
' Outgoing access list is not set\n'
' Inbound Common access list is not set\n'
' Inbound access list is not set\n'
' Proxy ARP is enabled\n'
' Local Proxy ARP is disabled\n'
' Security level is default\n'
' Split horizon is enabled\n'
' ICMP redirects are always sent\n'
' ICMP unreachables are always sent\n'
' ICMP mask replies are never sent\n'
' IP fast switching is enabled\n'
' IP Flow switching is disabled\n'
' IP CEF switching is enabled\n'
' IP CEF switching turbo vector\n'
' IP Null turbo vector\n'
' Associated unicast routing topologies:\n'
' Topology "base", operation state is UP\n'
' IP multicast fast switching is enabled\n'
' IP multicast distributed fast switching is disabled\n'
' IP route-cache flags are Fast, CEF\n'
' Router Discovery is disabled\n'
' IP output packet accounting is disabled\n'
' IP access violation accounting is disabled\n'
' TCP/IP header compression is disabled\n'
' RTP/IP header compression is disabled\n'
' Probe proxy name replies are disabled\n'
' Policy routing is disabled\n'
' Network address translation is disabled\n'
' BGP Policy Mapping is disabled\n'
' Input features: MCI Check\n'
' IPv4 WCCP Redirect outbound is disabled\n'
' IPv4 WCCP Redirect inbound is disabled\n'
' IPv4 WCCP Redirect exclude is disabled\n'
'GigabitEthernet0/0/1.304 is up, line protocol is up\n'
' Internet address is 10.194.235.1/25\n'
' Broadcast address is 255.255.255.255\n'
' Address determined by non-volatile memory\n'
' MTU is 1500 bytes\n'
' Helper address is 10.99.216.4\n'
' Directed broadcast forwarding is disabled\n'
' Outgoing Common access list is not set\n'
' Outgoing access list is not set\n'
' Inbound Common access list is not set\n'
' Inbound access list is not set\n'
' Proxy ARP is enabled\n'
' Local Proxy ARP is disabled\n'
' Security level is default\n'
' Split horizon is enabled\n'
' ICMP redirects are always sent\n'
' ICMP unreachables are always sent\n'
' ICMP mask replies are never sent\n'
' IP fast switching is enabled\n'
' IP Flow switching is disabled\n'
' IP CEF switching is enabled\n'
' IP CEF switching turbo vector\n'
' IP Null turbo vector\n'
' Associated unicast routing topologies:\n'
' Topology "base", operation state is UP\n'
' IP multicast fast switching is enabled\n'
' IP multicast distributed fast switching is disabled\n'
' IP route-cache flags are Fast, CEF\n'
' Router Discovery is disabled\n'
' IP output packet accounting is disabled\n'
' IP access violation accounting is disabled\n'
' TCP/IP header compression is disabled\n'
' RTP/IP header compression is disabled\n'
' Probe proxy name replies are disabled\n'
' Policy routing is disabled\n'
' Network address translation is disabled\n'
' BGP Policy Mapping is disabled\n'
' Input features: MCI Check\n'
' IPv4 WCCP Redirect outbound is disabled\n'
' IPv4 WCCP Redirect inbound is disabled\n'
' IPv4 WCCP Redirect exclude is disabled\n'
}
golden_output_helper = {'execute.return_value':
'''
Vlan1 is administratively down, line protocol is down
Internet protocol processing disabled
Vlan10 is up, line protocol is up
Internet address is 10.4.1.1/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 9154 bytes
Helper addresses are 10.1.1.1
10.2.2.2
10.3.3.3
10.4.4.4
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "user"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Vlan11 is up, line protocol is up
Internet address is 10.16.2.1/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 9154 bytes
Helper addresses are 10.1.1.1
10.2.2.2
10.3.3.3
10.4.4.4
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "user"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_output_helper = {
'Vlan1': {'enabled': False, 'oper_status': 'down'},
'Vlan10': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.1.1', '10.2.2.2', '10.3.3.3', '10.4.4.4'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.4.1.1/24': {'broadcast_address': '255.255.255.255',
'ip': '10.4.1.1',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 9154,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'vrf': 'user',
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Vlan11': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.1.1', '10.2.2.2', '10.3.3.3', '10.4.4.4'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.16.2.1/24': {'broadcast_address': '255.255.255.255',
'ip': '10.16.2.1',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 9154,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'vrf': 'user',
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}}
}
golden_output5 = {'execute.return_value':
'''
GigabitEthernet0/0/0 is up, line protocol is up
Internet protocol processing disabled
GigabitEthernet0/0/1 is administratively down, line protocol is down
Internet protocol processing disabled
GigabitEthernet0/0/2 is administratively down, line protocol is down
Internet protocol processing disabled
GigabitEthernet0/1/0 is up, line protocol is up
Internet address is 10.70.8.18/29
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is disabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are never sent
ICMP unreachables are never sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
Associated multicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: Virtual Fragment Reassembly, IPSec input classification, WCCP, MCI Check
Output features: IPSec output classification, QoS Classification, QoS Marking, IPSec: to crypto engine, Post-encryption output features, MFIB Adjacency
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is enabled
IPv4 WCCP Redirect exclude is disabled
GigabitEthernet0/1/1 is administratively down, line protocol is down
Internet protocol processing disabled
Service-Engine0/2/0 is up, line protocol is up
Internet protocol processing disabled
Service-Engine0/3/0 is up, line protocol is up
Internet protocol processing disabled
Service-Engine0/4/0 is up, line protocol is up
Internet protocol processing disabled
GigabitEthernet0 is administratively down, line protocol is down
Internet protocol processing disabled
Loopback0 is up, line protocol is up
Internet address is 10.1.1.188/32
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1514 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Loopback1 is up, line protocol is up
Internet address is 172.16.186.198/32
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1514 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Loopback2 is up, line protocol is up
Internet address is 172.16.186.199/32
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1514 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1 is up, line protocol is up
Internet protocol processing disabled
Port-channel1.100 is up, line protocol is up
Internet address is 10.1.1.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper addresses are 10.1.2.129
10.1.3.129
Directed broadcast forwarding is disabled
Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
Associated multicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: WCCP, MCI Check
Output features: MFIB Adjacency
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is enabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1.101 is up, line protocol is up
Internet address is 10.35.189.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper addresses are 10.1.2.129
10.1.3.129
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1.300 is up, line protocol is up
Internet address is 10.1.1.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper addresses are 10.1.2.129
10.1.3.129
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: WCCP, MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is enabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1.308 is up, line protocol is up
Internet address is 10.1.1.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper addresses are 10.1.2.129
10.1.3.129
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1.324 is up, line protocol is up
Internet address is 10.1.1.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper addresses are 10.1.2.129
10.1.3.129
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
Port-channel1.398 is up, line protocol is up
Internet address is 10.1.1.10/24
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1500 bytes
Helper address is 10.1.2.129
Directed broadcast forwarding is disabled
Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is disabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
Associated multicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: WCCP, MCI Check, TCP Adjust MSS
Output features: TCP Adjust MSS, MFIB Adjacency
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is enabled
IPv4 WCCP Redirect exclude is disabled
Tunnel10 is up, line protocol is up
Internet address is 172.16.186.198/30
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1420 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
Associated multicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: WCCP, MCI Check, TCP Adjust MSS
Output features: TCP Adjust MSS
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is enabled
IPv4 WCCP Redirect exclude is disabled
Tunnel20 is up, line protocol is up
Internet address is 172.16.186.198/30
Broadcast address is 255.255.255.255
Address determined by non-volatile memory
MTU is 1420 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
Associated multicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check, TCP Adjust MSS
Output features: TCP Adjust MSS
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_output5 = {
'GigabitEthernet0': {'enabled': False, 'oper_status': 'down'},
'GigabitEthernet0/0/0': {'enabled': True, 'oper_status': 'up'},
'GigabitEthernet0/0/1': {'enabled': False, 'oper_status': 'down'},
'GigabitEthernet0/0/2': {'enabled': False, 'oper_status': 'down'},
'GigabitEthernet0/1/0': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'never sent',
'unreachables': 'never sent'},
'input_features': ['IPSec input classification',
'MCI Check',
'Virtual Fragment Reassembly',
'WCCP'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.70.8.18/29': {'broadcast_address': '255.255.255.255',
'ip': '10.70.8.18',
'prefix_length': '29',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'multicast_groups': ['224.0.0.1',
'224.0.0.13',
'224.0.0.2',
'224.0.0.22'],
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': False,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': True,
'redirect_outbound': False}},
'GigabitEthernet0/1/1': {'enabled': False, 'oper_status': 'down'},
'Loopback0': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.188/32': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.188',
'prefix_length': '32',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1514,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Loopback1': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'172.16.186.198/32': {'broadcast_address': '255.255.255.255',
'ip': '172.16.186.198',
'prefix_length': '32',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1514,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Loopback2': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'172.16.186.199/32': {'broadcast_address': '255.255.255.255',
'ip': '172.16.186.199',
'prefix_length': '32',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1514,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Port-channel1': {'enabled': True, 'oper_status': 'up'},
'Port-channel1.100': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129', '10.1.3.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check', 'WCCP'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'multicast_groups': ['224.0.0.1',
'224.0.0.13',
'224.0.0.2',
'224.0.0.22'],
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': True,
'redirect_outbound': False}},
'Port-channel1.101': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129', '10.1.3.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.35.189.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.35.189.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Port-channel1.300': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129', '10.1.3.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check', 'WCCP'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': True,
'redirect_outbound': False}},
'Port-channel1.308': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129', '10.1.3.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Port-channel1.324': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129', '10.1.3.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}},
'Port-channel1.398': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'helper_address': ['10.1.2.129'],
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check',
'TCP Adjust MSS',
'WCCP'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'10.1.1.10/24': {'broadcast_address': '255.255.255.255',
'ip': '10.1.1.10',
'prefix_length': '24',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1500,
'multicast_groups': ['224.0.0.1',
'224.0.0.13',
'224.0.0.2',
'224.0.0.22'],
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': False,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': True,
'redirect_outbound': False}},
'Service-Engine0/2/0': {'enabled': True, 'oper_status': 'up'},
'Service-Engine0/3/0': {'enabled': True, 'oper_status': 'up'},
'Service-Engine0/4/0': {'enabled': True, 'oper_status': 'up'},
'Tunnel10': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check', 'TCP Adjust MSS', 'WCCP'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'172.16.186.198/30': {'broadcast_address': '255.255.255.255',
'ip': '172.16.186.198',
'prefix_length': '30',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1420,
'multicast_groups': ['224.0.0.1',
'224.0.0.13',
'224.0.0.2',
'224.0.0.22'],
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': True,
'redirect_outbound': False}},
'Tunnel20': {'address_determined_by': 'non-volatile memory',
'bgp_policy_mapping': False,
'directed_broadcast_forwarding': False,
'enabled': True,
'icmp': {'mask_replies': 'never sent',
'redirects': 'always sent',
'unreachables': 'always sent'},
'input_features': ['MCI Check', 'TCP Adjust MSS'],
'ip_access_violation_accounting': False,
'ip_cef_switching': True,
'ip_cef_switching_turbo_vector': True,
'ip_fast_switching': True,
'ip_flow_switching': False,
'ip_multicast_distributed_fast_switching': False,
'ip_multicast_fast_switching': True,
'ip_null_turbo_vector': True,
'ip_output_packet_accounting': False,
'ip_route_cache_flags': ['CEF', 'Fast'],
'ipv4': {'172.16.186.198/30': {'broadcast_address': '255.255.255.255',
'ip': '172.16.186.198',
'prefix_length': '30',
'secondary': False}},
'local_proxy_arp': False,
'mtu': 1420,
'multicast_groups': ['224.0.0.1',
'224.0.0.13',
'224.0.0.2',
'224.0.0.22'],
'network_address_translation': False,
'oper_status': 'up',
'policy_routing': False,
'probe_proxy_name_replies': False,
'proxy_arp': True,
'router_discovery': False,
'rtp_ip_header_compression': False,
'security_level': 'default',
'split_horizon': True,
'tcp_ip_header_compression': False,
'unicast_routing_topologies': {'topology': {'base': {'status': 'up'}}},
'wccp': {'redirect_exclude': False,
'redirect_inbound': False,
'redirect_outbound': False}}}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden2(self):
self.device = Mock(**self.golden_output2)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output2)
def test_interface_golden(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_interface_output)
def test_golden3(self):
self.device = Mock(**self.golden_output_helper)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output_helper)
def test_golden4(self):
self.device = Mock(**self.golden_output4)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output4)
def test_golden5(self):
self.device = Mock(**self.golden_output5)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output5)
#############################################################################
# unitest For show ipv6 interface
#############################################################################
class TestShowIpv6Interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/1": {
"joined_group_addresses": [
"FF02::1"
],
"ipv6": {
"2001:DB8:2:2::2/64": {
"ip": "2001:DB8:2:2::2",
"prefix_length": "64",
"status": "tentative"
},
"2001:db8:400::1/126": {
"ip": "2001:db8:400::1",
"prefix_length": "126",
"status": "tentative"
},
"2001:DB8:1:1::1/64": {
"ip": "2001:DB8:1:1::1",
"prefix_length": "64",
"status": "tentative"
},
"2001:DB8:4:4:257:D2FF:FEFF:428C/64": {
"ip": "2001:DB8:4:4:257:D2FF:FEFF:428C",
"prefix_length": "64",
"status": "tentative",
"eui_64": True
},
"2001:DB8:3:3::3/64": {
"ip": "2001:DB8:3:3::3",
"prefix_length": "64",
"status": "tentative",
"anycast": True
},
"FE80::257:D2FF:FEFF:428C": {
"ip": "FE80::257:D2FF:FEFF:428C",
"status": "tentative",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"suppress": False,
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "down",
"enabled": False,
"mtu": 1500
},
"Vlan211": {
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF14:1",
"FF02::1:FF28:1A71"
],
"ipv6": {
"2001:10::14:1/112": {
"ip": "2001:10::14:1",
"prefix_length": "112",
"status": "valid",
'autoconf': {
'preferred_lifetime': 604711,
'valid_lifetime': 2591911,
},
},
"FE80::257:D2FF:FE28:1A71": {
"ip": "FE80::257:D2FF:FE28:1A71",
"status": "valid",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"suppress": False,
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "up",
"enabled": True,
"autoconf": True,
"mtu": 1500
},
"GigabitEthernet3": {
"enabled": True,
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF1E:4F2",
"FF02::2"
],
"ipv6": {
"enabled": False,
"FE80::5054:FF:FE1E:4F2": {
"ip": "FE80::5054:FF:FE1E:4F2",
"status": "valid",
"origin": "link_layer",
},
"unnumbered": {
"interface_ref": "Loopback0",
},
"icmp": {
"unreachables": "sent",
"redirects": True,
"error_messages_limited": 100
},
"nd": {
"suppress": False,
"dad_attempts": 1,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000,
"advertised_reachable_time": 0,
"advertised_retransmit_interval": 0,
"router_advertisements_interval": 200,
"router_advertisements_live": 1800,
"advertised_default_router_preference": 'Medium',
"advertised_reachable_time_unspecified": True,
"advertised_retransmit_interval_unspecified": True,
},
},
"oper_status": "up",
"mtu": 1500,
"addresses_config_method": 'stateless autoconfig',
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::257:D2FF:FE28:1A71
No Virtual link-local address(es):
Stateless address autoconfig enabled
Global unicast address(es):
2001:10::14:1, subnet is 2001:10::14:0/112
valid lifetime 2591911 preferred lifetime 604711
Joined group address(es):
FF02::1
FF02::1:FF14:1
FF02::1:FF28:1A71
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet1/0/1 is administratively down, line protocol is down
IPv6 is tentative, link-local address is FE80::257:D2FF:FEFF:428C [TEN]
No Virtual link-local address(es):
Description: desc
Global unicast address(es):
2001:db8:400::1, subnet is 2001:db8:400::/126 [TEN]
2001:DB8:1:1::1, subnet is 2001:DB8:1:1::/64 [TEN]
2001:DB8:2:2::2, subnet is 2001:DB8:2:2::/64 [TEN]
2001:DB8:3:3::3, subnet is 2001:DB8:3:3::/64 [ANY/TEN]
2001:DB8:4:4:257:D2FF:FEFF:428C, subnet is 2001:DB8:4:4::/64 [EUI/TEN]
Joined group address(es):
FF02::1
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet3 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::5054:FF:FE1E:4F2
No Virtual link-local address(es):
Interface is unnumbered. Using address of Loopback0
No global unicast address is configured
Joined group address(es):
FF02::1
FF02::2
FF02::1:FF1E:4F2
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
'''}
golden_parsed_output1 = {
'GigabitEthernet2': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::F816:3EFF:FE19:ABBA': {
'ip': 'FE80::F816:3EFF:FE19:ABBA',
'origin': 'link_layer',
'status': 'valid'},
'2001:db8:8548:1::1/64': {
'ip': '2001:db8:8548:1::1',
'prefix_length': '64',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': True,
'dad_enabled': True,
'dad_attempts': 1,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_live': 1801,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:1',
'FF02::1:FF19:ABBA',
'FF02::2',
'FF02::5',
'FF02::6'],
'mtu': 1500,
'addresses_config_method': 'stateless autoconfig'},
'GigabitEthernet3': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::F816:3EFF:FE72:8407': {
'ip': 'FE80::F816:3EFF:FE72:8407',
'origin': 'link_layer',
'status': 'valid'},
'2001:db8:888c:1::1/64': {
'ip': '2001:db8:888c:1::1',
'prefix_length': '64',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': False,
'dad_enabled': True,
'dad_attempts': 1,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_interval': 200,
'router_advertisements_live': 1800,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:1',
'FF02::1:FF72:8407',
'FF02::2',
'FF02::5',
'FF02::6'],
'mtu': 1500,
'vrf': 'vrf1',
'addresses_config_method': 'stateless autoconfig'},
'GigabitEthernet4': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::F816:3EFF:FE19:8682': {
'ip': 'FE80::F816:3EFF:FE19:8682',
'origin': 'link_layer',
'status': 'valid'},
'2001:db8:c56d:1::1/64': {
'ip': '2001:db8:c56d:1::1',
'prefix_length': '64',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': False,
'dad_enabled': True,
'dad_attempts': 1,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_interval': 200,
'router_advertisements_live': 1800,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:1',
'FF02::1:FF19:8682',
'FF02::2',
'FF02::5',
'FF02::6'],
'mtu': 1500,
'addresses_config_method': 'stateless autoconfig'},
'GigabitEthernet5': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::F816:3EFF:FEC7:8140': {
'ip': 'FE80::F816:3EFF:FEC7:8140',
'origin': 'link_layer',
'status': 'valid'},
'2001:db8:c8d1:1::1/64': {
'ip': '2001:db8:c8d1:1::1',
'prefix_length': '64',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': False,
'dad_enabled': True,
'dad_attempts': 1,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_interval': 200,
'router_advertisements_live': 1800,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:1',
'FF02::1:FFC7:8140',
'FF02::2',
'FF02::5',
'FF02::6'],
'mtu': 1500,
'vrf': 'vrf1',
'addresses_config_method': 'stateless autoconfig'},
'Loopback0': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::21E:49FF:FE5D:CC00': {
'ip': 'FE80::21E:49FF:FE5D:CC00',
'origin': 'link_layer',
'status': 'valid'},
'2001:1:1::1/128': {
'ip': '2001:1:1::1',
'prefix_length': '128',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': True,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_live': 1800,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:1',
'FF02::1:FF5D:CC00',
'FF02::2',
'FF02::5'],
'mtu': 1514,
'addresses_config_method': 'stateless autoconfig'},
'Loopback1': {
'enabled': True,
'oper_status': 'up',
'ipv6': {
'FE80::21E:49FF:FE5D:CC00': {
'ip': 'FE80::21E:49FF:FE5D:CC00',
'origin': 'link_layer',
'status': 'valid'},
'2001:11:11::11/128': {
'ip': '2001:11:11::11',
'prefix_length': '128',
'status': 'valid'},
'enabled': True,
'icmp': {
'error_messages_limited': 100,
'redirects': True,
'unreachables': 'sent'},
'nd': {
'suppress': True,
'reachable_time': 30000,
'using_time': 30000,
'advertised_reachable_time': 0,
'advertised_reachable_time_unspecified': True,
'advertised_retransmit_interval': 0,
'advertised_retransmit_interval_unspecified': True,
'router_advertisements_live': 1800,
'advertised_default_router_preference': 'Medium'}},
'joined_group_addresses': ['FF02::1',
'FF02::1:FF00:11',
'FF02::1:FF5D:CC00',
'FF02::2',
'FF02::5'],
'mtu': 1514,
'vrf': 'vrf1',
'addresses_config_method': 'stateless autoconfig'}}
golden_output1 = {'execute.return_value': '''
csr1kv-1#show ipv6 interface
GigabitEthernet2 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::F816:3EFF:FE19:ABBA
No Virtual link-local address(es):
Global unicast address(es):
2001:db8:8548:1::1, subnet is 2001:db8:8548:1::/64
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::6
FF02::1:FF00:1
FF02::1:FF19:ABBA
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements live for 1801 seconds
ND advertised default router preference is Medium
ND RAs are suppressed (periodic)
Hosts use stateless autoconfig for addresses.
GigabitEthernet3 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::F816:3EFF:FE72:8407
No Virtual link-local address(es):
Global unicast address(es):
2001:db8:888c:1::1, subnet is 2001:db8:888c:1::/64
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::6
FF02::1:FF00:1
FF02::1:FF72:8407
MTU is 1500 bytes
VPN Routing/Forwarding "vrf1"
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
GigabitEthernet4 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::F816:3EFF:FE19:8682
No Virtual link-local address(es):
Global unicast address(es):
2001:db8:c56d:1::1, subnet is 2001:db8:c56d:1::/64
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::6
FF02::1:FF00:1
FF02::1:FF19:8682
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
GigabitEthernet5 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::F816:3EFF:FEC7:8140
No Virtual link-local address(es):
Global unicast address(es):
2001:db8:c8d1:1::1, subnet is 2001:db8:c8d1:1::/64
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::6
FF02::1:FF00:1
FF02::1:FFC7:8140
MTU is 1500 bytes
VPN Routing/Forwarding "vrf1"
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
Loopback0 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::21E:49FF:FE5D:CC00
No Virtual link-local address(es):
Global unicast address(es):
2001:1:1::1, subnet is 2001:1:1::1/128
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::1:FF00:1
FF02::1:FF5D:CC00
MTU is 1514 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is not supported
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
ND RAs are suppressed (periodic)
Hosts use stateless autoconfig for addresses.
Loopback1 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::21E:49FF:FE5D:CC00
No Virtual link-local address(es):
Global unicast address(es):
2001:11:11::11, subnet is 2001:11:11::11/128
Joined group address(es):
FF02::1
FF02::2
FF02::5
FF02::1:FF00:11
FF02::1:FF5D:CC00
MTU is 1514 bytes
VPN Routing/Forwarding "vrf1"
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is not supported
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
ND RAs are suppressed (periodic)
Hosts use stateless autoconfig for addresses.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpv6Interface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpv6Interface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden1(self):
self.device = Mock(**self.golden_output1)
interface_obj = ShowIpv6Interface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output1)
#############################################################################
# unitest For show interfaces trunk
#############################################################################
class TestShowInterfacesTrunk(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/4": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/4",
"encapsulation": "802.1q"
},
"GigabitEthernet1/0/23": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/23",
"encapsulation": "802.1q"
},
"GigabitEthernet1/0/24": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": 'none',
"name": "GigabitEthernet1/0/24",
"encapsulation": "802.1q"
},
"Port-channel12": {
"vlans_allowed_active_in_mgmt_domain": '100-110',
"vlans_allowed_on_trunk": '100-110',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '100-110',
"name": "Port-channel12",
"encapsulation": "802.1q"
},
"Port-channel14": {
"vlans_allowed_active_in_mgmt_domain": '200-211, 300-302',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "Port-channel14",
"encapsulation": "802.1q"
}
}
}
golden_output = {'execute.return_value': '''
Port Mode Encapsulation Status Native vlan
Gi1/0/4 on 802.1q trunking 1
Gi1/0/23 on 802.1q trunking 1
Gi1/0/24 on 802.1q trunking 1
Po12 on 802.1q trunking 1
Po14 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 200-211
Po12 100-110
Po14 200-211
Port Vlans allowed and active in management domain
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 200-211
Po12 100-110
Po14 200-211, 300-302
Port Vlans in spanning tree forwarding state and not pruned
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 none
Po12 100-110
Port Vlans in spanning tree forwarding state and not pruned
Po14 200-211
'''}
golden_output_1 = {'execute.return_value': '''
Port Mode Encapsulation Status Native vlan
Gi1/0/4 auto n-802.1q trunking 1
Gi1/0/23 on 802.1q trunking 1
Gi1/0/24 auto n-isl trunking 1
Po12 auto isl trunking 1
Po14 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 200-211
Po12 100-110
Po14 200-211
Port Vlans allowed and active in management domain
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 200-211
Po12 100-110
Po14 200-211, 300-302
Port Vlans in spanning tree forwarding state and not pruned
Gi1/0/4 200-211
Gi1/0/23 200-211
Gi1/0/24 none
Po12 100-110
Port Vlans in spanning tree forwarding state and not pruned
Po14 200-211
'''
}
golden_parsed_output_1 = {
"interface": {
"GigabitEthernet1/0/4": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "auto",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/4",
"encapsulation": "n-802.1q"
},
"GigabitEthernet1/0/23": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/23",
"encapsulation": "802.1q"
},
"GigabitEthernet1/0/24": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "auto",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": 'none',
"name": "GigabitEthernet1/0/24",
"encapsulation": "n-isl"
},
"Port-channel12": {
"vlans_allowed_active_in_mgmt_domain": '100-110',
"vlans_allowed_on_trunk": '100-110',
"mode": "auto",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '100-110',
"name": "Port-channel12",
"encapsulation": "isl"
},
"Port-channel14": {
"vlans_allowed_active_in_mgmt_domain": '200-211, 300-302',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "Port-channel14",
"encapsulation": "802.1q"
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
parsed_output = interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_1(self):
self.device = Mock(**self.golden_output_1)
interface_obj = ShowInterfacesTrunk(device=self.device)
parsed_output = interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
#############################################################################
# unitest For show interfaces <WORD> counters
#############################################################################
class TestShowInterfacesCounters(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/1": {
"out": {
"mcast_pkts": 188396,
"bcast_pkts": 0,
"ucast_pkts": 124435064,
"name": "GigabitEthernet1/0/1",
"octets": 24884341205
},
"in": {
"mcast_pkts": 214513,
"bcast_pkts": 0,
"ucast_pkts": 15716712,
"name": "GigabitEthernet1/0/1",
"octets": 3161931167
}
}
}
}
golden_output = {'execute.return_value': '''
Port InOctets InUcastPkts InMcastPkts InBcastPkts
Gi1/0/1 3161931167 15716712 214513 0
Port OutOctets OutUcastPkts OutMcastPkts OutBcastPkts
Gi1/0/1 24884341205 124435064 188396 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesCounters(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse(interface='Gi1/0/1')
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesCounters(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1/0/1')
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <interface> accounting
#############################################################################
class TestShowInterfacesAccounting(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = \
{
"GigabitEthernet1": {
"accounting": {
"arp": {
"chars_in": 4590030,
"chars_out": 120,
"pkts_in": 109280,
"pkts_out": 2
},
"ip": {
"chars_in": 2173570,
"chars_out": 2167858,
"pkts_in": 22150,
"pkts_out": 22121
},
"ipv6": {
"chars_in": 1944,
"chars_out": 0,
"pkts_in": 24,
"pkts_out": 0
},
"other": {
"chars_in": 5306164,
"chars_out": 120,
"pkts_in": 112674,
"pkts_out": 2
}
}
},
"GigabitEthernet2": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 968690,
"chars_out": 1148402,
"pkts_in": 11745,
"pkts_out": 10821
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
},
"GigabitEthernet3": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 1190691,
"chars_out": 1376253,
"pkts_in": 15271,
"pkts_out": 14382
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
}
}
golden_parsed_output2 = {
'GigabitEthernet11': {
'description': 'OOB Net',
'accounting': {
'arp': {
'chars_in': 42242472,
'chars_out': 83700,
'pkts_in': 748749,
'pkts_out': 1395,
},
'ip': {
'chars_in': 11143657,
'chars_out': 76200963,
'pkts_in': 190404,
'pkts_out': 233969,
},
'ipv6': {
'chars_in': 29412,
'chars_out': 0,
'pkts_in': 374,
'pkts_out': 0,
},
'other': {
'chars_in': 42241938,
'chars_out': 83700,
'pkts_in': 748738,
'pkts_out': 1395,
},
},
},
'GigabitEthernet12': {
'description': 'toP',
'accounting': {
'arp': {
'chars_in': 6748,
'chars_out': 7076,
'pkts_in': 111,
'pkts_out': 115,
},
'dec mop': {
'chars_in': 154,
'chars_out': 154,
'pkts_in': 2,
'pkts_out': 2,
},
'ip': {
'chars_in': 20043363,
'chars_out': 17367856,
'pkts_in': 244424,
'pkts_out': 196065,
},
'mpls': {
'chars_in': 0,
'chars_out': 3379706,
'pkts_in': 0,
'pkts_out': 49529,
},
'other': {
'chars_in': 6926,
'chars_out': 7166,
'pkts_in': 112,
'pkts_out': 116,
},
},
},
}
golden_output = {'execute.return_value': '''
show interface accounting
GigabitEthernet1
Protocol Pkts In Chars In Pkts Out Chars Out
Other 112674 5306164 2 120
IP 22150 2173570 22121 2167858
ARP 109280 4590030 2 120
IPv6 24 1944 0 0
GigabitEthernet2
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 11745 968690 10821 1148402
ARP 91 5460 92 5520
IPv6 1 70 0 0
GigabitEthernet3
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 15271 1190691 14382 1376253
ARP 91 5460 92 5520
IPv6 1 70 0 0
Loopback0
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
Loopback1
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
'''}
golden_output2 = {'execute.return_value': '''
GigabitEthernet11 OOB Net
Protocol Pkts In Chars In Pkts Out Chars Out
Other 748738 42241938 1395 83700
IP 190404 11143657 233969 76200963
ARP 748749 42242472 1395 83700
IPv6 374 29412 0 0
GigabitEthernet12 toP
Protocol Pkts In Chars In Pkts Out Chars Out
Other 112 6926 116 7166
IP 244424 20043363 196065 17367856
DEC MOP 2 154 2 154
ARP 111 6748 115 7076
MPLS 0 0 49529 3379706
GigabitEthernet13 to Device1
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesAccounting(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfacesAccounting(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowInterfacesAccounting(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output2)
###################################################
# unit test for show interfaces stats
####################################################
class TestShowInterfacesStats(unittest.TestCase):
"""unit test for show interfaces stats """
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
Router#show interface stats
Load for five secs: 5%/1%; one minute: 8%; five minutes: 9%
Time source is NTP, 07:38:10.599 EST Thu Sep 8 2016
GigabitEthernet0/0
Switching path Pkts In Chars In Pkts Out Chars Out
Processor 0 0 225 77625
Route cache 0 0 0 0
Multi-Processor Fwding 950 221250 500 57000
Total 950 221250 725 134625
GigabitEthernet0/1
Switching path Pkts In Chars In Pkts Out Chars Out
Processor 1 60 226 77685
Route cache 0 0 0 0
Multi-Processor Fwding 500 57000 500 57000
Total 501 57060 726 134685
GigabitEthernet0/2
Switching path Pkts In Chars In Pkts Out Chars Out
Processor 1 60 226 77685
Route cache 0 0 0 0
Multi-Processor Fwding 0 0 0 0
Total 1 60 226 77685
FastEthernet1/0
Switching path Pkts In Chars In Pkts Out Chars Out
Processor 34015 5331012 1579 158190
Route cache 0 0 0 0
Total 34015 5331012 1579 158190
'''}
golden_parsed_output = {
"GigabitEthernet0/0": {
"switching_path": {
"processor": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 225,
"chars_out": 77625
},
"route_cache": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"multi_processor_fwding": {
"pkts_in": 950,
"chars_in": 221250,
"pkts_out": 500,
"chars_out": 57000
},
"total": {
"pkts_in": 950,
"chars_in": 221250,
"pkts_out": 725,
"chars_out": 134625
}
}
},
"GigabitEthernet0/1": {
"switching_path": {
"processor": {
"pkts_in": 1,
"chars_in": 60,
"pkts_out": 226,
"chars_out": 77685
},
"route_cache": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"multi_processor_fwding": {
"pkts_in": 500,
"chars_in": 57000,
"pkts_out": 500,
"chars_out": 57000
},
"total": {
"pkts_in": 501,
"chars_in": 57060,
"pkts_out": 726,
"chars_out": 134685
}
}
},
"GigabitEthernet0/2": {
"switching_path": {
"processor": {
"pkts_in": 1,
"chars_in": 60,
"pkts_out": 226,
"chars_out": 77685
},
"route_cache": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"multi_processor_fwding": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"total": {
"pkts_in": 1,
"chars_in": 60,
"pkts_out": 226,
"chars_out": 77685
}
}
},
"FastEthernet1/0": {
"switching_path": {
"processor": {
"pkts_in": 34015,
"chars_in": 5331012,
"pkts_out": 1579,
"chars_out": 158190
},
"route_cache": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"total": {
"pkts_in": 34015,
"chars_in": 5331012,
"pkts_out": 1579,
"chars_out": 158190
}
}
}
}
golden_output_interface = {'execute.return_value': '''
Router#show interface gigabitEthernet 0/0/0 stats
Load for five secs: 5%/1%; one minute: 8%; five minutes: 9%
Time source is NTP, 07:38:10.599 EST Thu Sep 8 2016
GigabitEthernet0/0/0
Switching path Pkts In Chars In Pkts Out Chars Out
Processor 33 2507 33 2490
Route cache 0 0 0 0
Distributed cache 62581 53049894 125156 29719204
Total 62614 53052401 125189 29721694
'''}
golden_parsed_output_interface = {
"GigabitEthernet0/0/0": {
"switching_path": {
"processor": {
"pkts_in": 33,
"chars_in": 2507,
"pkts_out": 33,
"chars_out": 2490
},
"route_cache": {
"pkts_in": 0,
"chars_in": 0,
"pkts_out": 0,
"chars_out": 0
},
"distributed_cache": {
"pkts_in": 62581,
"chars_in": 53049894,
"pkts_out": 125156,
"chars_out": 29719204
},
"total": {
"pkts_in": 62614,
"chars_in": 53052401,
"pkts_out": 125189,
"chars_out": 29721694
}
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesStats(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowInterfacesStats(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_show_interfaces(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_interface)
obj = ShowInterfacesStats(device=self.device)
parsed_output = obj.parse(interface='GigabitEthernet0/0/0')
self.assertEqual(parsed_output,self.golden_parsed_output_interface)
###################################################
# unit test for show interfaces description
####################################################
class TestShowInterfacesDescription(unittest.TestCase):
"""unit test for show interfaces description """
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_output = {'execute.return_value': '''
Interface Status Protocol Description
Gi0/0 up up
Gi0/1 up up
Gi0/2 up up
Gi0/2.90 up up
Gi0/2.110 up up
Gi0/2.115 up up
Gi0/2.120 up up
Gi0/2.390 up up
Gi0/2.410 up up
Gi0/2.415 up up
Gi0/2.420 up up
Gi0/3 up up
Gi0/3.90 up up
Gi0/3.110 up up
Gi0/3.115 up up
Gi0/3.120 up up
Gi0/3.390 up up
Gi0/3.410 up up
Gi0/3.415 up up
Gi0/3.420 up up
Gi1/0 up up
Gi1/1 up up
Gi1/2 up up
Gi1/3 up up
Lo0 up up
Lo300 up up
Po12 up up
Po13 up up
Tu0 up up Pim Register Tunnel (Encap) for RP 2001:2:2:2::2
Tu1 up up Pim Register Tunnel (Encap) for Embedded RP
Tu2 up up Pim Register Tunnel (Encap) for RP 10.16.2.2
Tu3 up up Pim Register Tunnel (Encap) for RP 10.16.2.2 on VRF VRF1
Tu4 up up Pim Register Tunnel (Decap) for RP 10.4.1.1 on VRF VRF1
Tu5 up up Pim Register Tunnel (Decap) for RP 10.4.1.1
Tu6 up up Pim Register Tunnel (Encap) for RP 10.4.1.1 on VRF VRF1
Tu7 up up Pim Register Tunnel (Encap) for RP 10.4.1.1
'''}
golden_parsed_output = {
"interfaces": {
"GigabitEthernet0/0": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/1": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.90": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.110": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.115": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.120": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.390": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.410": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.415": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/2.420": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.90": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.110": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.115": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.120": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.390": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.410": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.415": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/3.420": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet1/0": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet1/1": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet1/2": {
"description": "",
"protocol": "up",
"status": "up"
},
"GigabitEthernet1/3": {
"description": "",
"protocol": "up",
"status": "up"
},
"Loopback0": {
"description": "",
"protocol": "up",
"status": "up"
},
"Loopback300": {
"description": "",
"protocol": "up",
"status": "up"
},
"Port-channel12": {
"description": "",
"protocol": "up",
"status": "up"
},
"Port-channel13": {
"description": "",
"protocol": "up",
"status": "up"
},
"Tunnel0": {
"description": "Pim Register Tunnel (Encap) for RP 2001:2:2:2::2",
"protocol": "up",
"status": "up"
},
"Tunnel1": {
"description": "Pim Register Tunnel (Encap) for Embedded RP",
"protocol": "up",
"status": "up"
},
"Tunnel2": {
"description": "Pim Register Tunnel (Encap) for RP 10.16.2.2",
"protocol": "up",
"status": "up"
},
"Tunnel3": {
"description": "Pim Register Tunnel (Encap) for RP 10.16.2.2 on VRF VRF1",
"protocol": "up",
"status": "up"
},
"Tunnel4": {
"description": "Pim Register Tunnel (Decap) for RP 10.4.1.1 on VRF VRF1",
"protocol": "up",
"status": "up"
},
"Tunnel5": {
"description": "Pim Register Tunnel (Decap) for RP 10.4.1.1",
"protocol": "up",
"status": "up"
},
"Tunnel6": {
"description": "Pim Register Tunnel (Encap) for RP 10.4.1.1 on VRF VRF1",
"protocol": "up",
"status": "up"
},
"Tunnel7": {
"description": "Pim Register Tunnel (Encap) for RP 10.4.1.1",
"protocol": "up",
"status": "up"
}
}
}
golden_interface_output = {'execute.return_value': '''
Interface Status Protocol Description
GigabitEthernet0/0 up up
'''}
golden_parsed_interface_output = {
"interfaces": {
"GigabitEthernet0/0": {
"description": "",
"protocol": "up",
"status": "up"
}
}
}
golden_interface_output1 = {'execute.return_value': '''
Interface Status Protocol Description
Gi0/0 up up OOB Management
Gi0/1 admin down down to router2
Lo0 up up Loopback
'''}
golden_parsed_interface_output1 = {
"interfaces": {
"GigabitEthernet0/0": {
"status": "up",
"protocol": "up",
"description": "OOB Management"
},
"GigabitEthernet0/1": {
"status": "admin down",
"protocol": "down",
"description": "to router2"
},
"Loopback0": {
"status": "up",
"protocol": "up",
"description": "Loopback"
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesDescription(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfacesDescription(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_interface(self):
self.device = Mock(**self.golden_interface_output)
obj = ShowInterfacesDescription(device=self.device)
parsed_output = obj.parse(interface='Gi0/0')
self.assertEqual(parsed_output,self.golden_parsed_interface_output)
def test_golden_interface1(self):
self.device = Mock(**self.golden_interface_output1)
obj = ShowInterfacesDescription(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_interface_output1)
###################################################
# unit test for show interfaces status
####################################################
class TestShowInterfacesStatus(unittest.TestCase):
"""unit test for show interfaces status """
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_interface_output1 = {'execute.return_value': '''
show interfaces status
Port Name Status Vlan Duplex Speed Type
Gi1/1 To Abcde notconnect 1 auto auto 10/100/1000-TX
Gi1/2 TelenlqPOIU notconnect 125 full 100 10/100/1000-TX
Gi1/3 SE connected 132 a-full a-1000 10/100/1000-TX
Gi1/7 notconnect 99 auto auto 10/100/1000-TX
Gi1/10 To cft123 connected trunk a-full a-1000 10/100/1000-TX
Gi1/44 connected 550 a-full a-1000 10/100/1000-TX
Gi1/45 ASDFGH connected trunk a-full a-1000 10/100/1000-TX
Gi1/46 notconnect 99 auto auto 10/100/1000-TX
Gi2/11 APQWSR connected 136 a-full a-1000 10/100/1000-TX
Gi2/12 notconnect 99 auto auto 10/100/1000-TX
Gi2/23 connected 140 a-full a-100 10/100/1000-TX
Gi2/24 ASDFGH connected trunk a-full a-1000 10/100/1000-TX
Te3/2 inactive 1 full auto No XCVR
Gi3/4 notconnect 99 full 1000 No Gbic
Gi3/5 To loedutjb234 connected trunk full 1000 1000BaseSX
Gi3/6 To loedutjb345 connected trunk full 1000 1000BaseSX
Gi1/1/0/1 FAST-HELLO connected 4094 a-full a-1000 10/100/1000BaseTX
Te1/1/2 VSL connected trunk full a-10G 10GBase-SR
Te2/1/20 disabled 1 full auto No XCVR
Te2/1/21 VSL LINK1 disabled 1 full auto No XCVR
Po10 VSL LINK2 connected trunk a-full a-10G
'''}
golden_parsed_interface_output1 = {
'interfaces': {
'GigabitEthernet1/1': {
'duplex_code': 'auto',
'name': 'To Abcde',
'port_speed': 'auto',
'status': 'notconnect',
'type': '10/100/1000-TX',
'vlan': '1'
},
'GigabitEthernet1/1/0/1': {
'duplex_code': 'a-full',
'name': 'FAST-HELLO',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000BaseTX',
'vlan': '4094'
},
'GigabitEthernet1/10': {
'duplex_code': 'a-full',
'name': 'To cft123',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': 'trunk'
},
'GigabitEthernet1/2': {
'duplex_code': 'full',
'name': 'TelenlqPOIU',
'port_speed': '100',
'status': 'notconnect',
'type': '10/100/1000-TX',
'vlan': '125'
},
'GigabitEthernet1/3': {
'duplex_code': 'a-full',
'name': 'SE',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': '132'
},
'GigabitEthernet1/44': {
'duplex_code': 'a-full',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': '550'
},
'GigabitEthernet1/45': {
'duplex_code': 'a-full',
'name': 'ASDFGH',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': 'trunk'
},
'GigabitEthernet1/46': {
'duplex_code': 'auto',
'port_speed': 'auto',
'status': 'notconnect',
'type': '10/100/1000-TX',
'vlan': '99'
},
'GigabitEthernet1/7': {
'duplex_code': 'auto',
'port_speed': 'auto',
'status': 'notconnect',
'type': '10/100/1000-TX',
'vlan': '99'
},
'GigabitEthernet2/11': {
'duplex_code': 'a-full',
'name': 'APQWSR',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': '136'
},
'GigabitEthernet2/12': {
'duplex_code': 'auto',
'port_speed': 'auto',
'status': 'notconnect',
'type': '10/100/1000-TX',
'vlan': '99'
},
'GigabitEthernet2/23': {
'duplex_code': 'a-full',
'port_speed': 'a-100',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': '140'
},
'GigabitEthernet2/24': {
'duplex_code': 'a-full',
'name': 'ASDFGH',
'port_speed': 'a-1000',
'status': 'connected',
'type': '10/100/1000-TX',
'vlan': 'trunk'
},
'GigabitEthernet3/4': {
'duplex_code': 'full',
'port_speed': '1000',
'status': 'notconnect',
'type': 'No Gbic',
'vlan': '99',
},
'GigabitEthernet3/5': {
'duplex_code': 'full',
'name': 'To loedutjb234',
'port_speed': '1000',
'status': 'connected',
'type': '1000BaseSX',
'vlan': 'trunk'
},
'GigabitEthernet3/6': {
'duplex_code': 'full',
'name': 'To loedutjb345',
'port_speed': '1000',
'status': 'connected',
'type': '1000BaseSX',
'vlan': 'trunk'
},
'TenGigabitEthernet3/2': {
'duplex_code': 'full',
'port_speed': 'auto',
'status': 'inactive',
'type': 'No XCVR',
'vlan': '1',
},
'TenGigabitEthernet1/1/2': {
'duplex_code': 'full',
'name': 'VSL',
'port_speed': 'a-10G',
'status': 'connected',
'type': '10GBase-SR',
'vlan': 'trunk'
},
'TenGigabitEthernet2/1/20': {
'duplex_code': 'full',
'port_speed': 'auto',
'status': 'disabled',
'type': 'No XCVR',
'vlan': '1'
},
'TenGigabitEthernet2/1/21': {
'duplex_code': 'full',
'name': 'VSL LINK1',
'port_speed': 'auto',
'status': 'disabled',
'type': 'No XCVR',
'vlan': '1'
},
'Port-channel10': {
'duplex_code': 'a-full',
'name': 'VSL LINK2',
'port_speed': 'a-10G',
'status': 'connected',
'vlan': 'trunk'
},
'TenGigabitEthernet3/2': {
'duplex_code': 'full',
'port_speed': 'auto',
'status': 'inactive',
'type': 'No XCVR',
'vlan': '1'
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesStatus(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_interface1(self):
self.device = Mock(**self.golden_interface_output1)
obj = ShowInterfacesStatus(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_interface_output1)
class TestShowInterfaceTransceiverDetail(unittest.TestCase):
"""unit test for show interface {interface} transceiver detail """
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
# show interface GigabitEthernet1/0/0 transceiver detail
transceiver is present
type is 10Gbase-LR
name is CISCO-FINISAR
part number is FTLX1474D3BCL-CS
revision is A
serial number is FNS17221JJZ
nominal bitrate is 10300 MBit/sec
Link length supported for 9/125um fiber is 10 km
cisco id is --
cisco extended id number is 4
cisco part number is 10-2457-02
cisco product id is SFP-10G-LR
cisco vendor id is V02
number of lanes 1
'''}
golden_parsed_output = {
'interfaces': {
'GigabitEthernet1/0/0': {
'cisco_extended_id_number': '4',
'cisco_id': '--',
'cisco_part_number': '10-2457-02',
'cisco_product_id': 'SFP-10G-LR',
'cisco_vendor_id': 'V02',
'link_length_supported_for_9/125um_fiber': '10 km',
'name': 'CISCO-FINISAR',
'nominal_bitrate': '10300 MBit/sec',
'number_of_lanes': '1',
'part_number': 'FTLX1474D3BCL-CS',
'revision': 'A',
'serial_number': 'FNS17221JJZ',
'transceiver': 'present',
'type': '10Gbase-LR',
},
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfaceTransceiverDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='GigabitEthernet1/0/0')
def test_golden_interface1(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfaceTransceiverDetail(device=self.device)
parsed_output = obj.parse(interface='GigabitEthernet1/0/0')
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| 40.292039
| 161
| 0.47224
|
39b9894f6f24bdfd9733b641701f49d9da88c563
| 3,244
|
py
|
Python
|
tensorflow_model_optimization/python/core/sparsity/keras/prune_distributed_test.py
|
coffeeshaychildren/master-computing-upload
|
e9352d0d52f40ef022c74ae01ca9e03395bdf860
|
[
"Apache-2.0"
] | 1
|
2019-08-22T19:11:05.000Z
|
2019-08-22T19:11:05.000Z
|
tensorflow_model_optimization/python/core/sparsity/keras/prune_distributed_test.py
|
coffeeshaychildren/master-computing-upload
|
e9352d0d52f40ef022c74ae01ca9e03395bdf860
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_optimization/python/core/sparsity/keras/prune_distributed_test.py
|
coffeeshaychildren/master-computing-upload
|
e9352d0d52f40ef022c74ae01ca9e03395bdf860
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed pruning test."""
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.platform import test
from tensorflow_model_optimization.python.core.keras import test_utils as keras_test_utils
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule
from tensorflow_model_optimization.python.core.sparsity.keras import test_utils
def _distribution_strategies():
return [
collective_all_reduce_strategy.CollectiveAllReduceStrategy(),
mirrored_strategy.MirroredStrategy(),
# TODO(pulkitb): Add parameter_server
# parameter_server_strategy.ParameterServerStrategy(),
one_device_strategy.OneDeviceStrategy('/cpu:0'),
]
class PruneDistributedTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(PruneDistributedTest, self).setUp()
self.params = {
'pruning_schedule': pruning_schedule.ConstantSparsity(0.5, 0, -1, 1),
'block_size': (1, 1),
'block_pooling_type': 'AVG'
}
@parameterized.parameters(_distribution_strategies())
def testPrunesSimpleDenseModel(self, distribution):
with distribution.scope():
model = prune.prune_low_magnitude(
keras_test_utils.build_simple_dense_model(), **self.params)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Model hasn't been trained yet. Sparsity 0.0
test_utils.assert_model_sparsity(self, 0.0, model)
# Simple unpruned model. No sparsity.
model.fit(
np.random.rand(20, 10),
keras.utils.to_categorical(np.random.randint(5, size=(20, 1)), 5),
epochs=2,
callbacks=[pruning_callbacks.UpdatePruningStep()],
batch_size=20)
model.predict(np.random.rand(20, 10))
test_utils.assert_model_sparsity(self, 0.5, model)
_, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
with prune.prune_scope():
loaded_model = keras.models.load_model(keras_file)
test_utils.assert_model_sparsity(self, 0.5, loaded_model)
if __name__ == '__main__':
test.main()
| 37.287356
| 90
| 0.734587
|
a201ebcdb6b368a42cc3ee1217855756341120a6
| 29,203
|
py
|
Python
|
heat/engine/resources/openstack/cinder/volume.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
heat/engine/resources/openstack/cinder/volume.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
heat/engine/resources/openstack/cinder/volume.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import scheduler_hints as sh
from heat.engine.resources import volume_base as vb
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
"""A resource that implements Cinder volumes.
Cinder volume is a storage in the form of block devices. It can be used,
for example, for providing storage to instance. Volume supports creation
from snapshot, backup or image. Also volume can be created only by size.
"""
PROPERTIES = (
AVAILABILITY_ZONE, SIZE, SNAPSHOT_ID, BACKUP_ID, NAME,
DESCRIPTION, VOLUME_TYPE, METADATA, IMAGE_REF, IMAGE,
SOURCE_VOLID, CINDER_SCHEDULER_HINTS, READ_ONLY, MULTI_ATTACH,
) = (
'availability_zone', 'size', 'snapshot_id', 'backup_id', 'name',
'description', 'volume_type', 'metadata', 'imageRef', 'image',
'source_volid', 'scheduler_hints', 'read_only', 'multiattach',
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, SIZE_ATTR, SNAPSHOT_ID_ATTR, DISPLAY_NAME_ATTR,
DISPLAY_DESCRIPTION_ATTR, VOLUME_TYPE_ATTR, METADATA_ATTR,
SOURCE_VOLID_ATTR, STATUS, CREATED_AT, BOOTABLE, METADATA_VALUES_ATTR,
ENCRYPTED_ATTR, ATTACHMENTS, MULTI_ATTACH_ATTR,
) = (
'availability_zone', 'size', 'snapshot_id', 'display_name',
'display_description', 'volume_type', 'metadata',
'source_volid', 'status', 'created_at', 'bootable', 'metadata_values',
'encrypted', 'attachments', 'multiattach',
)
properties_schema = {
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('The availability zone in which the volume will be created.')
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume in GB. '
'On update only increase in size is supported. This property '
'is required unless property %(backup)s or %(vol)s or '
'%(snapshot)s is specified.')
% dict(backup=BACKUP_ID,
vol=SOURCE_VOLID,
snapshot=SNAPSHOT_ID),
update_allowed=True,
constraints=[
constraints.Range(min=1),
]
),
SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('If specified, the snapshot to create the volume from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BACKUP_ID: properties.Schema(
properties.Schema.STRING,
_('If specified, the backup to create the volume from.'),
update_allowed=True,
constraints=[
constraints.CustomConstraint('cinder.backup')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('A name used to distinguish the volume.'),
update_allowed=True,
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('A description of the volume.'),
update_allowed=True,
),
VOLUME_TYPE: properties.Schema(
properties.Schema.STRING,
_('If specified, the type of volume to use, mapping to a '
'specific backend.'),
constraints=[
constraints.CustomConstraint('cinder.vtype')
],
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Key/value pairs to associate with the volume.'),
update_allowed=True,
),
IMAGE_REF: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create the volume from.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
message=_('Use property %s.') % IMAGE,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1'
)
)
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('If specified, the name or ID of the image to create the '
'volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
]
),
SOURCE_VOLID: properties.Schema(
properties.Schema.STRING,
_('If specified, the volume to use as source.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
CINDER_SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'the Cinder scheduler creating a volume.'),
support_status=support.SupportStatus(version='2015.1')
),
READ_ONLY: properties.Schema(
properties.Schema.BOOLEAN,
_('Enables or disables read-only access mode of volume.'),
support_status=support.SupportStatus(version='5.0.0'),
update_allowed=True,
),
MULTI_ATTACH: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether allow the volume to be attached more than once.'),
support_status=support.SupportStatus(version='6.0.0'),
),
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The availability zone in which the volume is located.'),
type=attributes.Schema.STRING
),
SIZE_ATTR: attributes.Schema(
_('The size of the volume in GB.'),
type=attributes.Schema.STRING
),
SNAPSHOT_ID_ATTR: attributes.Schema(
_('The snapshot the volume was created from, if any.'),
type=attributes.Schema.STRING
),
DISPLAY_NAME_ATTR: attributes.Schema(
_('Name of the volume.'),
type=attributes.Schema.STRING
),
DISPLAY_DESCRIPTION_ATTR: attributes.Schema(
_('Description of the volume.'),
type=attributes.Schema.STRING
),
VOLUME_TYPE_ATTR: attributes.Schema(
_('The type of the volume mapping to a backend, if any.'),
type=attributes.Schema.STRING
),
METADATA_ATTR: attributes.Schema(
_('Key/value pairs associated with the volume.'),
type=attributes.Schema.STRING
),
SOURCE_VOLID_ATTR: attributes.Schema(
_('The volume used as source, if any.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The current status of the volume.'),
type=attributes.Schema.STRING
),
CREATED_AT: attributes.Schema(
_('The timestamp indicating volume creation.'),
type=attributes.Schema.STRING
),
BOOTABLE: attributes.Schema(
_('Boolean indicating if the volume can be booted or not.'),
type=attributes.Schema.STRING
),
METADATA_VALUES_ATTR: attributes.Schema(
_('Key/value pairs associated with the volume in raw dict form.'),
type=attributes.Schema.MAP
),
ENCRYPTED_ATTR: attributes.Schema(
_('Boolean indicating if the volume is encrypted or not.'),
type=attributes.Schema.STRING
),
ATTACHMENTS: attributes.Schema(
_('The list of attachments of the volume.'),
type=attributes.Schema.STRING
),
MULTI_ATTACH_ATTR: attributes.Schema(
_('Boolean indicating whether allow the volume to be attached '
'more than once.'),
type=attributes.Schema.BOOLEAN,
support_status=support.SupportStatus(version='6.0.0'),
),
}
_volume_creating_status = ['creating', 'restoring-backup', 'downloading']
entity = 'volumes'
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
[self.IMAGE],
value_path=[self.IMAGE_REF]
)
]
def _name(self):
name = self.properties[self.NAME]
if name:
return name
return super(CinderVolume, self)._name()
def _description(self):
return self.properties[self.DESCRIPTION]
def _create_arguments(self):
arguments = {
'size': self.properties[self.SIZE],
'availability_zone': self.properties[self.AVAILABILITY_ZONE],
}
scheduler_hints = self._scheduler_hints(
self.properties[self.CINDER_SCHEDULER_HINTS])
if scheduler_hints:
arguments[self.CINDER_SCHEDULER_HINTS] = scheduler_hints
if self.properties[self.IMAGE]:
arguments['imageRef'] = self.client_plugin(
'glance').find_image_by_name_or_id(
self.properties[self.IMAGE])
elif self.properties[self.IMAGE_REF]:
arguments['imageRef'] = self.properties[self.IMAGE_REF]
optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
self.METADATA, self.MULTI_ATTACH)
arguments.update((prop, self.properties[prop]) for prop in optionals
if self.properties[prop] is not None)
return arguments
def _resolve_attribute(self, name):
cinder = self.client()
vol = cinder.volumes.get(self.resource_id)
if name == self.METADATA_ATTR:
return six.text_type(jsonutils.dumps(vol.metadata))
elif name == self.METADATA_VALUES_ATTR:
return vol.metadata
if name == self.DISPLAY_NAME_ATTR:
return vol.name
elif name == self.DISPLAY_DESCRIPTION_ATTR:
return vol.description
return six.text_type(getattr(vol, name))
def check_create_complete(self, vol_id):
complete = super(CinderVolume, self).check_create_complete(vol_id)
# Cinder just supports update read only for volume in available,
# if we update in handle_create(), maybe the volume still in
# creating, then cinder will raise an exception
if complete:
self._update_read_only(self.properties[self.READ_ONLY])
return complete
def _extend_volume(self, new_size):
try:
self.client().volumes.extend(self.resource_id, new_size)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to extend volume %(vol)s - %(err)s") % {
'vol': self.resource_id, 'err': six.text_type(ex)})
else:
raise
return True
def _update_read_only(self, read_only_flag):
if read_only_flag is not None:
self.client().volumes.update_readonly_flag(self.resource_id,
read_only_flag)
return True
def _check_extend_volume_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'extending':
LOG.debug("Volume %s is being extended" % vol.id)
return False
if vol.status != 'available':
LOG.info(_LI("Resize failed: Volume %(vol)s "
"is in %(status)s state."),
{'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume resize failed'))
LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
return True
def _backup_restore(self, vol_id, backup_id):
try:
self.client().restores.restore(backup_id, vol_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to restore volume %(vol)s from backup %(backup)s "
"- %(err)s") % {'vol': vol_id,
'backup': backup_id,
'err': ex})
else:
raise
return True
def _check_backup_restore_complete(self):
vol = self.client().volumes.get(self.resource_id)
if vol.status == 'restoring-backup':
LOG.debug("Volume %s is being restoring from backup" % vol.id)
return False
if vol.status != 'available':
LOG.info(_LI("Restore failed: Volume %(vol)s is in %(status)s "
"state."), {'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume backup restore failed'))
LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id})
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
vol = None
cinder = self.client()
prg_resize = None
prg_attach = None
prg_detach = None
prg_restore = None
prg_access = None
# update the name and description for cinder volume
if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
vol = cinder.volumes.get(self.resource_id)
update_name = (prop_diff.get(self.NAME) or
self.properties[self.NAME])
update_description = (prop_diff.get(self.DESCRIPTION) or
self.properties[self.DESCRIPTION])
kwargs = self._fetch_name_and_description(update_name,
update_description)
cinder.volumes.update(vol, **kwargs)
# update the metadata for cinder volume
if self.METADATA in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
metadata = prop_diff.get(self.METADATA)
cinder.volumes.update_all_metadata(vol, metadata)
# retype
if self.VOLUME_TYPE in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
new_vol_type = prop_diff.get(self.VOLUME_TYPE)
cinder.volumes.retype(vol, new_vol_type, 'never')
# update read_only access mode
if self.READ_ONLY in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
flag = prop_diff.get(self.READ_ONLY)
prg_access = progress.VolumeUpdateAccessModeProgress(
read_only=flag)
prg_detach, prg_attach = self._detach_attach_progress(vol)
# restore the volume from backup
if self.BACKUP_ID in prop_diff:
prg_restore = progress.VolumeBackupRestoreProgress(
vol_id=self.resource_id,
backup_id=prop_diff.get(self.BACKUP_ID))
# extend volume size
if self.SIZE in prop_diff:
if not vol:
vol = cinder.volumes.get(self.resource_id)
new_size = prop_diff[self.SIZE]
if new_size < vol.size:
raise exception.NotSupported(feature=_("Shrinking volume"))
elif new_size > vol.size:
prg_resize = progress.VolumeResizeProgress(size=new_size)
prg_detach, prg_attach = self._detach_attach_progress(vol)
return prg_restore, prg_detach, prg_resize, prg_access, prg_attach
def _detach_attach_progress(self, vol):
prg_attach = None
prg_detach = None
if vol.attachments:
# NOTE(pshchelo):
# this relies on current behavior of cinder attachments,
# i.e. volume attachments is a list with len<=1,
# so the volume can be attached only to single instance,
# and id of attachment is the same as id of the volume
# it describes, so detach/attach the same volume
# will not change volume attachment id.
server_id = vol.attachments[0]['server_id']
device = vol.attachments[0]['device']
attachment_id = vol.attachments[0]['id']
prg_detach = progress.VolumeDetachProgress(
server_id, vol.id, attachment_id)
prg_attach = progress.VolumeAttachProgress(
server_id, vol.id, device)
return prg_detach, prg_attach
def _detach_volume_to_complete(self, prg_detach):
if not prg_detach.called:
self.client_plugin('nova').detach_volume(prg_detach.srv_id,
prg_detach.attach_id)
prg_detach.called = True
return False
if not prg_detach.cinder_complete:
cinder_complete_res = self.client_plugin(
).check_detach_volume_complete(prg_detach.vol_id)
prg_detach.cinder_complete = cinder_complete_res
return False
if not prg_detach.nova_complete:
prg_detach.nova_complete = self.client_plugin(
'nova').check_detach_volume_complete(prg_detach.srv_id,
prg_detach.attach_id)
return False
def _attach_volume_to_complete(self, prg_attach):
if not prg_attach.called:
prg_attach.called = self.client_plugin('nova').attach_volume(
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
return False
if not prg_attach.complete:
prg_attach.complete = self.client_plugin(
).check_attach_volume_complete(prg_attach.vol_id)
return prg_attach.complete
def check_update_complete(self, checkers):
prg_restore, prg_detach, prg_resize, prg_access, prg_attach = checkers
if prg_restore:
if not prg_restore.called:
prg_restore.called = self._backup_restore(
prg_restore.vol_id,
prg_restore.backup_id)
return False
if not prg_restore.complete:
prg_restore.complete = self._check_backup_restore_complete()
return prg_restore.complete and not prg_resize
if not prg_resize and not prg_access:
return True
# detach volume
if prg_detach:
if not prg_detach.nova_complete:
self._detach_volume_to_complete(prg_detach)
return False
# resize volume
if prg_resize:
if not prg_resize.called:
prg_resize.called = self._extend_volume(prg_resize.size)
return False
if not prg_resize.complete:
prg_resize.complete = self._check_extend_volume_complete()
return prg_resize.complete and not prg_attach
# update read_only access mode
if prg_access:
if not prg_access.called:
prg_access.called = self._update_read_only(
prg_access.read_only)
return False
# reattach volume back
if prg_attach:
return self._attach_volume_to_complete(prg_attach)
return True
def handle_snapshot(self):
backup = self.client().backups.create(self.resource_id)
self.data_set('backup_id', backup.id)
return backup.id
def check_snapshot_complete(self, backup_id):
backup = self.client().backups.get(backup_id)
if backup.status == 'creating':
return False
if backup.status == 'available':
return True
raise exception.Error(backup.fail_reason)
def handle_delete_snapshot(self, snapshot):
backup_id = snapshot['resource_data'].get('backup_id')
if not backup_id:
return
try:
self.client().backups.delete(backup_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return
else:
return backup_id
def check_delete_snapshot_complete(self, backup_id):
if not backup_id:
return True
try:
self.client().backups.get(backup_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
else:
return False
def _build_exclusive_options(self):
exclusive_options = []
allow_no_size_options = []
if self.properties.get(self.SNAPSHOT_ID):
exclusive_options.append(self.SNAPSHOT_ID)
allow_no_size_options.append(self.SNAPSHOT_ID)
if self.properties.get(self.SOURCE_VOLID):
exclusive_options.append(self.SOURCE_VOLID)
allow_no_size_options.append(self.SOURCE_VOLID)
if self.properties.get(self.IMAGE):
exclusive_options.append(self.IMAGE)
if self.properties.get(self.IMAGE_REF):
exclusive_options.append(self.IMAGE_REF)
return exclusive_options, allow_no_size_options
def _validate_create_sources(self):
exclusive_options, allow_no_size_ops = self._build_exclusive_options()
size = self.properties.get(self.SIZE)
if (size is None and
(len(allow_no_size_ops) != 1 or len(exclusive_options) != 1)):
msg = (_('If neither "%(backup_id)s" nor "%(size)s" is '
'provided, one and only one of "%(source_vol)s", '
'"%(snapshot_id)s" must be specified, but currently '
'specified options: %(exclusive_options)s.')
% {'backup_id': self.BACKUP_ID,
'size': self.SIZE,
'source_vol': self.SOURCE_VOLID,
'snapshot_id': self.SNAPSHOT_ID,
'exclusive_options': exclusive_options})
raise exception.StackValidationFailed(message=msg)
elif size and len(exclusive_options) > 1:
msg = (_('If "%(size)s" is provided, only one of '
'"%(image)s", "%(image_ref)s", "%(source_vol)s", '
'"%(snapshot_id)s" can be specified, but currently '
'specified options: %(exclusive_options)s.')
% {'size': self.SIZE,
'image': self.IMAGE,
'image_ref': self.IMAGE_REF,
'source_vol': self.SOURCE_VOLID,
'snapshot_id': self.SNAPSHOT_ID,
'exclusive_options': exclusive_options})
raise exception.StackValidationFailed(message=msg)
def validate(self):
"""Validate provided params."""
res = super(CinderVolume, self).validate()
if res is not None:
return res
# can not specify both image and imageRef
image = self.properties.get(self.IMAGE)
imageRef = self.properties.get(self.IMAGE_REF)
if image and imageRef:
raise exception.ResourcePropertyConflict(self.IMAGE,
self.IMAGE_REF)
# if not create from backup, need to check other create sources
if not self.properties.get(self.BACKUP_ID):
self._validate_create_sources()
def handle_restore(self, defn, restore_data):
backup_id = restore_data['resource_data']['backup_id']
# we can't ignore 'size' property: if user update the size
# of volume after snapshot, we need to change to old size
# when restore the volume.
ignore_props = (
self.IMAGE_REF, self.IMAGE, self.SOURCE_VOLID)
props = dict(
(key, value) for (key, value) in
six.iteritems(defn.properties(self.properties_schema))
if key not in ignore_props and value is not None)
props[self.BACKUP_ID] = backup_id
return defn.freeze(properties=props)
class CinderVolumeAttachment(vb.BaseVolumeAttachment):
"""Resource for associating volume to instance.
Resource for associating existing volume to instance. Also, the location
where the volume is exposed on the instance can be specified.
"""
PROPERTIES = (
INSTANCE_ID, VOLUME_ID, DEVICE,
) = (
'instance_uuid', 'volume_id', 'mountpoint',
)
properties_schema = {
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the server to which the volume attaches.'),
required=True,
update_allowed=True
),
VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to be attached.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
DEVICE: properties.Schema(
properties.Schema.STRING,
_('The location where the volume is exposed on the instance. This '
'assignment may not be honored and it is advised that the path '
'/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
update_allowed=True
),
}
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
prg_attach = None
prg_detach = None
if prop_diff:
# Even though some combinations of changed properties
# could be updated in UpdateReplace manner,
# we still first detach the old resource so that
# self.resource_id is not replaced prematurely
volume_id = self.properties[self.VOLUME_ID]
server_id = self.properties[self.INSTANCE_ID]
self.client_plugin('nova').detach_volume(server_id,
self.resource_id)
prg_detach = progress.VolumeDetachProgress(
server_id, volume_id, self.resource_id)
prg_detach.called = True
if self.VOLUME_ID in prop_diff:
volume_id = prop_diff.get(self.VOLUME_ID)
device = (self.properties[self.DEVICE]
if self.properties[self.DEVICE] else None)
if self.DEVICE in prop_diff:
device = (prop_diff[self.DEVICE]
if prop_diff[self.DEVICE] else None)
if self.INSTANCE_ID in prop_diff:
server_id = prop_diff.get(self.INSTANCE_ID)
prg_attach = progress.VolumeAttachProgress(
server_id, volume_id, device)
return prg_detach, prg_attach
def check_update_complete(self, checkers):
prg_detach, prg_attach = checkers
if not (prg_detach and prg_attach):
return True
if not prg_detach.cinder_complete:
prg_detach.cinder_complete = self.client_plugin(
).check_detach_volume_complete(prg_detach.vol_id)
return False
if not prg_detach.nova_complete:
prg_detach.nova_complete = self.client_plugin(
'nova').check_detach_volume_complete(prg_detach.srv_id,
self.resource_id)
return False
if not prg_attach.called:
prg_attach.called = self.client_plugin('nova').attach_volume(
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
return False
if not prg_attach.complete:
prg_attach.complete = self.client_plugin(
).check_attach_volume_complete(prg_attach.vol_id)
if prg_attach.complete:
self.resource_id_set(prg_attach.called)
return prg_attach.complete
return True
def resource_mapping():
return {
'OS::Cinder::Volume': CinderVolume,
'OS::Cinder::VolumeAttachment': CinderVolumeAttachment,
}
| 40.224518
| 79
| 0.59422
|
0587691c067d934bcf67aa6b9a88c77ce426d82d
| 16,041
|
py
|
Python
|
releng/generate_tests.py
|
kngoutham/test-infra
|
9720cce6f1f62be07cb63bfcb1d119b10c819353
|
[
"Apache-2.0"
] | null | null | null |
releng/generate_tests.py
|
kngoutham/test-infra
|
9720cce6f1f62be07cb63bfcb1d119b10c819353
|
[
"Apache-2.0"
] | null | null | null |
releng/generate_tests.py
|
kngoutham/test-infra
|
9720cce6f1f62be07cb63bfcb1d119b10c819353
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create e2e test definitions.
Usage example:
In $GOPATH/src/k8s.io/test-infra,
$ bazel run //releng:generate_tests -- \
--yaml-config-path=releng/test_config.yaml \
"""
import argparse
import hashlib
import os
import ruamel.yaml as yaml
# TODO(yguo0905): Generate Prow and testgrid configurations.
PROW_CONFIG_TEMPLATE = """
tags:
- generated # AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT!
interval:
cron:
labels:
preset-service-account: "true"
preset-k8s-ssh: "true"
name:
spec:
containers:
- args:
env:
image: gcr.io/k8s-testimages/kubekins-e2e:v20210418-e5f251e-master
resources:
requests:
cpu: 1000m
memory: 3Gi
limits:
cpu: 1000m
memory: 3Gi
"""
E2E_TESTGRID_CONFIG_TEMPLATE = """
name:
gcs_prefix:
column_header:
- configuration_value: node_os_image
- configuration_value: master_os_image
- configuration_value: Commit
- configuration_value: infra-commit
"""
GCS_LOG_PREFIX = "kubernetes-jenkins/logs/"
COMMENT = 'AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT.'
def get_sha1_hash(data):
"""Returns the SHA1 hash of the specified data."""
sha1_hash = hashlib.sha1()
sha1_hash.update(data.encode('utf-8'))
return sha1_hash.hexdigest()
def substitute(job_name, lines):
"""Replace '${job_name_hash}' in lines with the SHA1 hash of job_name."""
return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \
for line in lines]
def get_args(job_name, field):
"""Returns a list of args for the given field."""
if not field:
return []
return substitute(job_name, field.get('args', []))
def write_prow_configs_file(output_file, job_defs):
"""Writes the Prow configurations into output_file."""
with open(output_file, 'w') as fp:
yaml.dump(
job_defs, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def write_testgrid_config_file(output_file, testgrid_config):
"""Writes the TestGrid test group configurations into output_file."""
with open(output_file, 'w') as fp:
fp.write('# ' + COMMENT + '\n\n')
yaml.dump(
testgrid_config, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def apply_job_overrides(envs_or_args, job_envs_or_args):
'''Applies the envs or args overrides defined in the job level'''
original_envs_or_args = envs_or_args[:]
for job_env_or_arg in job_envs_or_args:
name = job_env_or_arg.split('=', 1)[0]
env_or_arg = next(
(x for x in original_envs_or_args if (x.strip().startswith('%s=' % name) or
x.strip() == name)), None)
if env_or_arg:
envs_or_args.remove(env_or_arg)
envs_or_args.append(job_env_or_arg)
class E2ENodeTest:
def __init__(self, job_name, job, config):
self.job_name = job_name
self.job = job
self.common = config['nodeCommon']
self.images = config['nodeImages']
self.k8s_versions = config['nodeK8sVersions']
self.test_suites = config['nodeTestSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite, k8s_version):
"""Returns the Prow config for the job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
# pull interval or cron from job
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['cron']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
if not container['env']:
container['env'] = []
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
container['args'].extend(k8s_version.get('args', []))
container['args'].append('--root=/go/src')
container['env'].extend([{'name':'GOPATH', 'value': '/go'}])
# Specify the appropriate kubekins-e2e image. This allows us to use a
# specific image (containing a particular Go version) to build and
# trigger the node e2e test to avoid issues like
# https://github.com/kubernetes/kubernetes/issues/43534.
if k8s_version.get('prowImage', None):
container['image'] = k8s_version['prowImage']
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 6:
raise ValueError('Expected 6 fields in job name', self.job_name)
image = self.images[fields[3]]
k8s_version = self.k8s_versions[fields[4][3:]]
test_suite = self.test_suites[fields[5]]
# envs are disallowed in node e2e tests.
if 'envs' in self.common or 'envs' in image or 'envs' in test_suite:
raise ValueError(
'envs are disallowed in node e2e test', self.job_name)
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates prow config.
prow_config = self.__get_prow_config(test_suite, k8s_version)
# Combine --node-args
node_args = []
job_args = []
for arg in job_config['args']:
if '--node-args=' in arg:
node_args.append(arg.split('=', 1)[1])
else:
job_args.append(arg)
if node_args:
flag = '--node-args='
for node_arg in node_args:
flag += '%s ' % node_arg
job_args.append(flag.strip())
job_config['args'] = job_args
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[3],
fields[4])
annotations = prow_config.setdefault('annotations', {})
annotations['testgrid-dashboards'] = dashboard
tab_name = '%s-%s-%s' % (fields[3], fields[4], fields[5])
annotations['testgrid-tab-name'] = tab_name
return job_config, prow_config, None
class E2ETest:
def __init__(self, output_dir, job_name, job, config):
self.env_filename = os.path.join(output_dir, '%s.env' % job_name)
self.job_name = job_name
self.job = job
self.common = config['common']
self.cloud_providers = config['cloudProviders']
self.images = config['images']
self.k8s_versions = config['k8sVersions']
self.test_suites = config['testSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite):
"""Returns the Prow config for the e2e job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['interval']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
container['args'].append('--bare')
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
return prow_config
def __get_testgrid_config(self):
tg_config = yaml.round_trip_load(E2E_TESTGRID_CONFIG_TEMPLATE)
tg_config['name'] = self.job_name
tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name
return tg_config
def initialize_dashboards_with_release_blocking_info(self, version):
dashboards = []
if self.job.get('releaseBlocking'):
dashboards.append('sig-release-%s-blocking' % version)
elif self.job.get('releaseInforming'):
dashboards.append('sig-release-%s-informing' % version)
else:
dashboards.append('sig-release-generated')
return dashboards
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 7:
raise ValueError('Expected 7 fields in job name', self.job_name)
cloud_provider = self.cloud_providers[fields[3]]
image = self.images[fields[4]]
k8s_version = self.k8s_versions[fields[5][3:]]
test_suite = self.test_suites[fields[6]]
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, cloud_provider))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, k8s_version))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates Prow config.
prow_config = self.__get_prow_config(test_suite)
tg_config = self.__get_testgrid_config()
annotations = prow_config.setdefault('annotations', {})
tab_name = '%s-%s-%s-%s' % (fields[3], fields[4], fields[5], fields[6])
annotations['testgrid-tab-name'] = tab_name
dashboards = self.initialize_dashboards_with_release_blocking_info(k8s_version['version'])
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[4],
fields[5])
dashboards.append(dashboard)
annotations['testgrid-dashboards'] = ', '.join(dashboards)
if 'testgridNumFailuresToAlert' in self.job:
annotations['testgrid-num-failures-to-alert'] = ('%s' %
self.job['testgridNumFailuresToAlert'])
return job_config, prow_config, tg_config
def for_each_job(output_dir, job_name, job, yaml_config):
"""Returns the job config and the Prow config for one test job."""
fields = job_name.split('-')
if len(fields) < 3:
raise ValueError('Expected at least 3 fields in job name', job_name)
job_type = fields[2]
# Generates configurations.
if job_type == 'e2e':
generator = E2ETest(output_dir, job_name, job, yaml_config)
elif job_type == 'e2enode':
generator = E2ENodeTest(job_name, job, yaml_config)
else:
raise ValueError('Unexpected job type ', job_type)
job_config, prow_config, testgrid_config = generator.generate()
# Applies job-level overrides.
apply_job_overrides(job_config['args'], get_args(job_name, job))
# merge job_config into prow_config
args = prow_config['spec']['containers'][0]['args']
args.append('--scenario=' + job_config['scenario'])
args.append('--')
args.extend(job_config['args'])
return prow_config, testgrid_config
def main(yaml_config_path, output_dir, testgrid_output_path):
"""Creates test job definitions.
Converts the test configurations in yaml_config_path to the job definitions
in output_dir/generated.yaml.
"""
# TODO(yguo0905): Validate the configurations from yaml_config_path.
with open(yaml_config_path) as fp:
yaml_config = yaml.safe_load(fp)
output_config = {}
output_config['periodics'] = []
testgrid_config = {'test_groups': []}
for job_name, _ in yaml_config['jobs'].items():
# Get the envs and args for each job defined under "jobs".
prow, testgrid = for_each_job(
output_dir, job_name, yaml_config['jobs'][job_name], yaml_config)
output_config['periodics'].append(prow)
if testgrid is not None:
testgrid_config['test_groups'].append(testgrid)
# Write the job definitions to --output-dir/generated.yaml
write_prow_configs_file(output_dir + 'generated.yaml', output_config)
write_testgrid_config_file(testgrid_output_path, testgrid_config)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Create test definitions from the given yaml config')
PARSER.add_argument('--yaml-config-path', help='Path to config.yaml')
PARSER.add_argument(
'--output-dir',
help='Prowjob config output dir',
default='config/jobs/kubernetes/generated/')
PARSER.add_argument(
'--testgrid-output-path',
help='Path to testgrid output file',
default='config/testgrids/generated-test-config.yaml')
ARGS = PARSER.parse_args()
main(
ARGS.yaml_config_path,
ARGS.output_dir,
ARGS.testgrid_output_path)
| 37.921986
| 100
| 0.623216
|
01e5155bb233e19097650d73951392ccef998bca
| 961
|
py
|
Python
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/XML/__init__.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2017-03-28T06:41:51.000Z
|
2017-03-28T06:41:51.000Z
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/XML/__init__.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | null | null | null |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/XML/__init__.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2016-12-13T21:08:58.000Z
|
2016-12-13T21:08:58.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
"""
| 41.782609
| 82
| 0.681582
|
0a7239c3ec59e322b2fec679af6b9e6c523c79cd
| 6,964
|
py
|
Python
|
projects/combinatory-chemistry/pool.py
|
germank/CommAI-env
|
9dc8007dac3c0e075c2b06aa012ac8fb683be2e6
|
[
"BSD-3-Clause"
] | null | null | null |
projects/combinatory-chemistry/pool.py
|
germank/CommAI-env
|
9dc8007dac3c0e075c2b06aa012ac8fb683be2e6
|
[
"BSD-3-Clause"
] | null | null | null |
projects/combinatory-chemistry/pool.py
|
germank/CommAI-env
|
9dc8007dac3c0e075c2b06aa012ac8fb683be2e6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from base_pool import BasePool
import random
import numpy as np
from collections import Counter
from expression import Expression, atomic_factory
from contextlib import contextmanager
from reaction import Reaction
import pickle
import signal
from math import isclose
import tqdm
primitives = 'IKS'
#primitives = 'IKSBCW'
class Pool(BasePool):
def __init__(self, N, p_reduce, p_combine, p_break,
max_sample_reductions = 250,
break_position = 'top',
reduce_regime = 'random',
food_size = None,
combination_method = 'consense',
proportions = {c: 1./len(primitives) for c in primitives}):
super(Pool, self).__init__(N, food_size)
assert isclose(p_reduce + p_combine + p_break, 1)
self.p_reduce = p_reduce
self.p_combine = p_combine
self.p_break = p_break
if reduce_regime == 'priority':
self.set_p_reduce(0)
self.ticks = 0
self.proportions = [proportions[t] for t in primitives]
self.max_sample_reductions = max_sample_reductions
self.break_position = break_position
self.reduce_regime = reduce_regime
self.combination_partner = None
self.combination_method = combination_method
self._add_random_atoms(N)
def grow(self, n):
self._grow_capacity(n)
self._add_random_atoms(n)
def _grow_capacity(self, n):
self.expressions.grow_capacity(n)
def _add_random_atoms(self, n):
coin_outcomes = np.random.choice(len(primitives), n, p=self.proportions)
coin_counts = Counter(coin_outcomes)
for i, count in coin_counts.items():
term = atomic_factory(primitives[i])
self.expressions.add(term, count)
def _random_term(self):
coin = np.random.choice(len(primitives), p=self.proportions)
return atomic_factory(primitives[coin])
def load(self, fn):
state = pickle.load(open(fn, 'rb'))
if isinstance(state, Pool):
self.expressions = state.expressions
else:
self.expressions = state
def save(self, fn):
pickle.dump(self.expressions, open(fn, 'wb'))
def step(self):
if self.reduce_regime == 'random':
self.step_random_reduce()
elif self.reduce_regime == 'priority':
self.step_priority_reduce()
def step_random_reduce(self):
t = self.pop_reactive()
action = self.pick_action()
if action == 'reduce':
self.tape_reduce_or_rollback(t)
elif action == 'combine':
self.tape_combine_or_rollback(t)
elif action == 'break':
self.tape_break_or_rollback(t)
else:
assert action == 'none'
def step_priority_reduce(self):
t = self.pop_reactive()
if not self.tape_reduce(t):
action = self.pick_action()
if action == 'combine':
self.tape_combine_or_rollback(t)
elif action == 'break':
self.tape_break_or_rollback(t)
else:
raise RuntimeError('Invalid action ' + action)
def tape_combine_or_rollback(self, t):
if len(self) >= 2:
if self.combination_method == 'consense':
if self.has_combination_partner():
t2 = self.pop_combination_partner()
self.tape_combine(t, t2)
else:
self.set_combination_partner(t)
elif self.combination_method == 'unilateral':
t2 = self.pop_reactive()
self.tape_combine(t, t2)
else:
self.rollback(t)
def tape_reduce_or_rollback(self, t):
if not self.tape_reduce(t):
self.rollback(t)
def tape_break_or_rollback(self, t):
if not self.tape_break(t):
self.rollback(t)
def has_combination_partner(self):
return self.combination_partner is not None
def pop_combination_partner(self):
x = self.combination_partner
self.combination_partner = None
return x
def set_combination_partner(self, x):
self.combination_partner = x
def pick_action(self):
r = random.random()
if r < self.p_reduce:
return 'reduce'
r -= self.p_reduce
if r < self.p_combine:
return 'combine'
r -= self.p_combine
if r < self.p_break:
return 'break'
r -= self.p_break
assert False, f'Invalid reminder {r:.2f} (>0)'
return 'none'
def tape_reduce(self, t):
if t.is_reducible(self):
reduced, reactives, biproducts = t.sreduce(self, self.max_sample_reductions)
reaction = Reaction((reduced, *biproducts), 'reduce',
(t, *reactives))
return self.apply_reaction(reaction)
else:
return False
def tape_combine(self, term_left, term_right):
combined = term_left.apply(term_right)
reaction = Reaction((combined,), 'combine', (term_left, term_right))
self.apply_reaction(reaction)
return True
def tape_break(self, t):
if not t.is_leaf():
if self.break_position == 'top':
term_left, term_right = t.top_break()
elif self.break_position == 'random':
term_left, term_right = t.random_break()
else:
raise RuntimeError(f'Invalid break position {self.break_position}')
reaction = Reaction((term_left, term_right), 'break', (t,))
self.apply_reaction(reaction)
return True
else:
return False
def set_p_reduce(self, p_reduce):
old_values = self.p_reduce, self.p_break, self.p_combine
self.p_reduce = p_reduce
if self.p_reduce > 1:
self.p_reduce = 1
rem = 1 - self.p_reduce
ratio = self.p_combine / (self.p_break + self.p_combine)
self.p_combine = ratio * rem
self.p_break = 1 - self.p_reduce - self.p_combine
assert self.p_break + self.p_reduce + self.p_combine == 1
assert self.p_break >= 0 and self.p_reduce >= 0 and self.p_combine >= 0
return old_values
def freeze(self):
self.old_p_values = self.set_p_reduce(1)
def unfreeze(self):
self.p_reduce, self.p_break, self.p_combine = self.old_p_values
def timeout_handler(signum, frame):
raise TimeoutError()
@contextmanager
def timeout(seconds):
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
| 33.004739
| 88
| 0.610281
|
e717b01c2e809a5f11dea892bd8d7cf73464bad0
| 12,697
|
py
|
Python
|
tethysext/atcore/controllers/resource_workflows/resource_workflow_router.py
|
Aquaveo/tethysext-atcore
|
7a83ccea24fdbbe806f12154f938554dd6c8015f
|
[
"BSD-3-Clause"
] | 3
|
2020-11-05T23:50:47.000Z
|
2021-02-26T21:43:29.000Z
|
tethysext/atcore/controllers/resource_workflows/resource_workflow_router.py
|
Aquaveo/tethysext-atcore
|
7a83ccea24fdbbe806f12154f938554dd6c8015f
|
[
"BSD-3-Clause"
] | 7
|
2020-10-29T16:53:49.000Z
|
2021-05-07T19:46:47.000Z
|
tethysext/atcore/controllers/resource_workflows/resource_workflow_router.py
|
Aquaveo/tethysext-atcore
|
7a83ccea24fdbbe806f12154f938554dd6c8015f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
********************************************************************************
* Name: resource_workflow_view.py
* Author: nswain
* Created On: November 19, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import logging
from sqlalchemy.exc import StatementError
from sqlalchemy.orm.exc import NoResultFound
from django.shortcuts import redirect, reverse
from django.contrib import messages
from tethys_apps.utilities import get_active_app
from tethysext.atcore.exceptions import ATCoreException
from tethysext.atcore.controllers.resource_workflows.mixins import WorkflowViewMixin
from tethysext.atcore.models.resource_workflow_steps import ResultsResourceWorkflowStep
log = logging.getLogger(f'tethys.{__name__}')
class ResourceWorkflowRouter(WorkflowViewMixin):
"""
Router for resource workflow views. Routes to appropriate step controller.
"""
base_template = 'atcore/app_users/base.html'
http_method_names = ['get', 'post', 'delete']
def get(self, request, resource_id, workflow_id, step_id=None, result_id=None, *args, **kwargs):
"""
Route GET requests.
Controller for the following url patterns:
/resource/<resource_id>/my-custom-workflow/<workflow_id>/
/resource/<resource_id>/my-custom-workflow/<workflow_id>/step/<step_id>/
/resource/<resource_id>/my-custom-workflow/<workflow_id>/step/<step_id>/result/<result_id>/
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render. Optional. Required if result_id given.
result_id(str): ID of the result to render. Optional.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
step_id_given = step_id is not None
result_id_given = result_id is not None
_ResourceWorkflow = self.get_resource_workflow_model()
session = None
try:
make_session = self.get_sessionmaker()
session = make_session()
workflow = self.get_workflow(request, workflow_id, session=session)
if not step_id_given:
_, step = workflow.get_next_step()
# Get step id
step_id = step.id if step else None
if not step_id:
messages.warning(request, 'Could not identify next step.')
return redirect(self.back_url)
else:
step = self.get_step(request, step_id, session)
# Determine if step is result step
is_result_step = isinstance(step, ResultsResourceWorkflowStep)
# Handle result steps
if is_result_step and not result_id_given:
result = step.get_last_result()
result_id = str(result.id) if result else None
if not result_id:
messages.warning(request, 'Could not identify a result.')
return redirect(self.back_url)
# If any of the required ids were not given originally, redirect to the appropriate url with derived ids
active_app = get_active_app(request)
app_namespace = active_app.namespace
url_kwargs = {'resource_id': resource_id, 'workflow_id': workflow_id, 'step_id': step_id}
if is_result_step and not result_id_given:
# Redirect to the result page
url_name = '{}:{}_workflow_step_result'.format(app_namespace, _ResourceWorkflow.TYPE)
url_kwargs.update({'result_id': result_id})
return redirect(reverse(url_name, kwargs=url_kwargs))
elif not is_result_step and not step_id_given:
# Redirect to next step page
url_name = '{}:{}_workflow_step'.format(app_namespace, _ResourceWorkflow.TYPE)
return redirect(reverse(url_name, kwargs=url_kwargs))
except (StatementError, NoResultFound):
messages.warning(request, 'The {} could not be found.'.format(
_ResourceWorkflow.DISPLAY_TYPE_SINGULAR.lower()
))
return redirect(self.back_url)
except ATCoreException as e:
error_message = str(e)
messages.warning(request, error_message)
return redirect(self.back_url)
finally:
session and session.close()
response = self._get_response(request, resource_id, workflow_id, step_id, result_id, args, kwargs)
return response
def post(self, request, resource_id, workflow_id, step_id, result_id=None, *args, **kwargs):
"""
Route POST requests.
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render.
result_id(str): ID of the result to render.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
response = self._get_response(request, resource_id, workflow_id, step_id, result_id, args, kwargs)
return response
def delete(self, request, resource_id, workflow_id, step_id, result_id=None, *args, **kwargs):
"""
Route DELETE requests.
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render.
result_id(str): ID of the result to render.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
response = self._get_response(request, resource_id, workflow_id, step_id, result_id, args, kwargs)
return response
def _get_response(self, request, resource_id, workflow_id, step_id, result_id, args, kwargs):
"""
Get controller from step or result that will handle the request.
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render.
result_id(str): ID of the result to render.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
if result_id:
response = self._route_to_result_controller(
request=request,
resource_id=resource_id,
workflow_id=workflow_id,
step_id=step_id,
result_id=result_id,
*args, **kwargs
)
else:
response = self._route_to_step_controller(
request=request,
resource_id=resource_id,
workflow_id=workflow_id,
step_id=step_id,
*args, **kwargs
)
return response
def _route_to_step_controller(self, request, resource_id, workflow_id, step_id, *args, **kwargs):
"""
Get controller from step that will handle the request.
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
_ResourceWorkflow = self.get_resource_workflow_model()
session = None
try:
make_session = self.get_sessionmaker()
session = make_session()
step = self.get_step(request, step_id, session=session)
# Validate HTTP method
if request.method.lower() not in step.controller.http_methods:
raise RuntimeError('An unexpected error has occurred: Method not allowed ({}).'.format(request.method))
controller = step.controller.instantiate(
_app=self._app,
_AppUser=self._AppUser,
_Organization=self._Organization,
_Resource=self._Resource,
_PermissionsManager=self._PermissionsManager,
_persistent_store_name=self._persistent_store_name,
_ResourceWorkflow=self._ResourceWorkflow,
_ResourceWorkflowStep=self._ResourceWorkflowStep,
base_template=self.base_template
)
response = controller(
request=request,
resource_id=resource_id,
workflow_id=workflow_id,
step_id=step_id,
back_url=self.back_url,
*args, **kwargs
)
return response
except (StatementError, NoResultFound):
messages.warning(request, 'Invalid step for workflow: {}.'.format(
_ResourceWorkflow.DISPLAY_TYPE_SINGULAR.lower()
))
return redirect(self.back_url)
except ATCoreException as e:
error_message = str(e)
messages.warning(request, error_message)
return redirect(self.back_url)
finally:
session and session.close()
def _route_to_result_controller(self, request, resource_id, workflow_id, step_id, result_id, *args, **kwargs):
"""
Get controller from result that will handle the request.
Args:
request(HttpRequest): The request.
resource_id(str): ID of the resource this workflow applies to.
workflow_id(str): ID of the workflow.
step_id(str): ID of the step to render.
result_id(str): ID of the result to render.
args, kwargs: Additional arguments passed to the controller.
Returns:
HttpResponse: A Django response.
"""
_ResourceWorkflow = self.get_resource_workflow_model()
session = None
try:
make_session = self.get_sessionmaker()
session = make_session()
step = self.get_step(request, step_id, session=session)
# Check if step is ResultsResourceWorkflowStep
if not isinstance(step, ResultsResourceWorkflowStep):
raise RuntimeError('Step must be a ResultsResourceWorkflowStep.')
# Get the result from the step
result = step.get_result(result_id=result_id)
# Validate HTTP method
if not result:
messages.error(request, 'Result not found.')
return redirect(self.back_url)
if request.method.lower() not in result.controller.http_methods:
raise RuntimeError('An unexpected error has occurred: Method not allowed ({}).'.format(request.method))
controller = result.controller.instantiate(
_app=self._app,
_AppUser=self._AppUser,
_Organization=self._Organization,
_Resource=self._Resource,
_PermissionsManager=self._PermissionsManager,
_persistent_store_name=self._persistent_store_name,
_ResourceWorkflow=self._ResourceWorkflow,
_ResourceWorkflowStep=self._ResourceWorkflowStep,
base_template=self.base_template
)
response = controller(
request=request,
resource_id=resource_id,
workflow_id=workflow_id,
step_id=step_id,
result_id=result_id,
back_url=self.back_url,
*args, **kwargs
)
return response
except (StatementError, NoResultFound):
messages.warning(request, 'Invalid step for workflow: {}.'.format(
_ResourceWorkflow.DISPLAY_TYPE_SINGULAR.lower()
))
return redirect(self.back_url)
except ATCoreException as e:
error_message = str(e)
messages.warning(request, error_message)
return redirect(self.back_url)
finally:
session and session.close()
| 39.554517
| 119
| 0.60345
|
050e3bc55d4eaa91a7dd77d376af5f586133c3f5
| 25,962
|
py
|
Python
|
tests/test_observable/test_replay.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
tests/test_observable/test_replay.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
tests/test_observable/test_replay.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
import unittest
import reactivex
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestReplay(unittest.TestCase):
def test_replay_count_basic(self):
connection = [None]
subscription = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(buffer_size=3, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results, scheduler)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [
on_next(450, 5),
on_next(450, 6),
on_next(450, 7),
on_next(520, 11),
]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_replay_count_error(self):
connection = [None]
subscription = [None]
ys = [None]
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(buffer_size=3, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action6)
scheduler.start()
assert results.messages == [
on_next(450, 5),
on_next(450, 6),
on_next(450, 7),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_replay_count_complete(self):
connection = [None]
subscription = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(buffer_size=3, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scehduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action)
scheduler.start()
assert results.messages == [
on_next(450, 5),
on_next(450, 6),
on_next(450, 7),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_replay_count_dispose(self):
connection = [None]
subscription = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(buffer_size=3, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(475, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [on_next(450, 5), on_next(450, 6), on_next(450, 7)]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_replay_count_multiple_connections(self):
xs = reactivex.never()
ys = xs.pipe(ops.replay(None, 3))
connection1 = ys.connect()
connection2 = ys.connect()
assert connection1 == connection2
connection1.dispose()
connection2.dispose()
connection3 = ys.connect()
assert connection1 != connection3
# def test_replay_count_lambda_zip_complete(self):
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(110, 7), on_next(220, 3), on_next(280, 4), on_next(290, 1), on_next(340, 8), on_next(360, 5), on_next(370, 6), on_next(390, 7), on_next(410, 13), on_next(430, 2), on_next(450, 9), on_next(520, 11), on_next(560, 20), on_completed(600))
# def action():
# def mapper(_xs):
# return _xs.take(6).repeat()
# return xs.replay(mapper, 3, scheduler=scheduler)
# results = scheduler.start(action, disposed=610)
# assert results.messages == [on_next(220, 3), on_next(280, 4), on_next(290, 1), on_next(340, 8), on_next(360, 5), on_next(370, 6), on_next(370, 8), on_next(370, 5), on_next(370, 6), on_next(390, 7), on_next(410, 13), on_next(430, 2), on_next(430, 7), on_next(430, 13), on_next(430, 2), on_next(450, 9), on_next(520, 11), on_next(560, 20), on_next(560, 9), on_next(560, 11), on_next(560, 20), on_next(600, 9), on_next(600, 11), on_next(600, 20), on_next(600, 9), on_next(600, 11), on_next(600, 20)]
# assert xs.subscriptions == [subscribe(200, 600)]
# def test_replay_count_lambda_zip_error(self):
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(110, 7), on_next(220, 3), on_next(280, 4), on_next(290, 1), on_next(340, 8), on_next(360, 5), on_next(370, 6), on_next(390, 7), on_next(410, 13), on_next(430, 2), on_next(450, 9), on_next(520, 11), on_next(560, 20), on_error(600, ex))
# def create():
# def mapper(_xs):
# return _xs.take(6).repeat()
# return xs.replay(mapper, 3, None)
# results = scheduler.start(create)
# assert results.messages == [on_next(221, 3), on_next(281, 4), on_next(291, 1), on_next(341, 8), on_next(361, 5), on_next(371, 6), on_next(372, 8), on_next(373, 5), on_next(374, 6), on_next(391, 7), on_next(411, 13), on_next(431, 2), on_next(432, 7), on_next(433, 13), on_next(434, 2), on_next(450, 9), on_next(520, 11), on_next(560, 20), on_next(562, 9), on_next(563, 11), on_next(564, 20), on_error(600, ex)]
# assert xs.subscriptions == [subscribe(200, 600)]
# def test_replay_count_lambda_zip_dispose(self):
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(110, 7), on_next(220, 3), on_next(280, 4), on_next(290, 1), on_next(340, 8), on_next(360, 5), on_next(370, 6), on_next(390, 7), on_next(410, 13), on_next(430, 2), on_next(450, 9), on_next(520, 11), on_next(560, 20), on_completed(600))
# def create():
# def mapper(_xs):
# return _xs.take(6).repeat()
# return xs.replay(mapper, 3, None)
# results = scheduler.start(create, disposed=470)
# assert results.messages == [on_next(221, 3), on_next(281, 4), on_next(291, 1), on_next(341, 8), on_next(361, 5), on_next(371, 6), on_next(372, 8), on_next(373, 5), on_next(374, 6), on_next(391, 7), on_next(411, 13), on_next(431, 2), on_next(432, 7), on_next(433, 13), on_next(434, 2), on_next(450, 9)]
# assert xs.subscriptions == [subscribe(200, 470)]
def test_replay_time_basic(self):
subscription = [None]
connection = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(window=150, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [
on_next(450, 8),
on_next(450, 5),
on_next(450, 6),
on_next(450, 7),
on_next(520, 11),
]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_replay_time_error(self):
subscription = [None]
connection = [None]
ys = [None]
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(window=75, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results, scheduler)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action6)
scheduler.start()
assert results.messages == [
on_next(450, 7),
on_next(520, 11),
on_next(560, 20),
on_error(600, ex),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_replay_time_complete(self):
subscription = [None]
connection = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(window=85, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results, scheduler)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(disposed, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action6)
scheduler.start()
assert results.messages == [
on_next(450, 6),
on_next(450, 7),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
]
assert xs.subscriptions == [subscribe(300, 400), subscribe(500, 600)]
def test_replay_time_dispose(self):
subscription = [None]
connection = [None]
ys = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(110, 7),
on_next(220, 3),
on_next(280, 4),
on_next(290, 1),
on_next(340, 8),
on_next(360, 5),
on_next(370, 6),
on_next(390, 7),
on_next(410, 13),
on_next(430, 2),
on_next(450, 9),
on_next(520, 11),
on_next(560, 20),
on_completed(600),
)
results = scheduler.create_observer()
def action0(scheduler, state):
ys[0] = xs.pipe(ops.replay(window=100, scheduler=scheduler))
scheduler.schedule_absolute(created, action0)
def action1(scheduler, state):
subscription[0] = ys[0].subscribe(results, scheduler)
scheduler.schedule_absolute(450, action1)
def action2(scheduler, state):
subscription[0].dispose()
scheduler.schedule_absolute(475, action2)
def action3(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(550, action6)
def action7(scheduler, state):
connection[0] = ys[0].connect(scheduler)
scheduler.schedule_absolute(650, action7)
def action8(scheduler, state):
connection[0].dispose()
scheduler.schedule_absolute(800, action8)
scheduler.start()
assert results.messages == [on_next(450, 5), on_next(450, 6), on_next(450, 7)]
assert xs.subscriptions == [
subscribe(300, 400),
subscribe(500, 550),
subscribe(650, 800),
]
def test_replay_time_multiple_connections(self):
xs = reactivex.never()
ys = xs.pipe(ops.replay(window=100))
connection1 = ys.connect()
connection2 = ys.connect()
assert connection1 == connection2
connection1.dispose()
connection2.dispose()
connection3 = ys.connect()
assert connection1 != connection3
# def test_replay_time_lambda_zip_complete(self):
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(
# on_next(110, 7),
# on_next(220, 3),
# on_next(280, 4),
# on_next(290, 1),
# on_next(340, 8),
# on_next(360, 5),
# on_next(370, 6),
# on_next(390, 7),
# on_next(410, 13),
# on_next(430, 2),
# on_next(450, 9),
# on_next(520, 11),
# on_next(560, 20),
# on_completed(600),
# )
# def create():
# def mapper(_xs):
# return _xs.pipe(ops.take(6), ops.repeat())
# return xs.pipe(ops.replay(mapper, None, 50))
# results = scheduler.start(create, disposed=610)
# assert results.messages == [
# on_next(220, 3),
# on_next(280, 4),
# on_next(290, 1),
# on_next(340, 8),
# on_next(360, 5),
# on_next(370, 6),
# on_next(370, 8),
# on_next(370, 5),
# on_next(370, 6),
# on_next(390, 7),
# on_next(410, 13),
# on_next(430, 2),
# on_next(430, 7),
# on_next(430, 13),
# on_next(430, 2),
# on_next(450, 9),
# on_next(520, 11),
# on_next(560, 20),
# on_next(560, 11),
# on_next(560, 20),
# on_next(600, 20),
# on_next(600, 20),
# on_next(600, 20),
# on_next(600, 20),
# ]
# assert xs.subscriptions == [subscribe(200, 600)]
# def test_replay_time_lambda_zip_error(self):
# ex = "ex"
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(
# on_next(110, 7),
# on_next(220, 3),
# on_next(280, 4),
# on_next(290, 1),
# on_next(340, 8),
# on_next(360, 5),
# on_next(370, 6),
# on_next(390, 7),
# on_next(410, 13),
# on_next(430, 2),
# on_next(450, 9),
# on_next(520, 11),
# on_next(560, 20),
# on_error(600, ex),
# )
# def create():
# def mapper(_xs):
# return _xs.take(6).repeat()
# return xs.pipe(ops.replay(mapper, None, 50))
# results = scheduler.start(create)
# assert results.messages == [
# on_next(221, 3),
# on_next(281, 4),
# on_next(291, 1),
# on_next(341, 8),
# on_next(361, 5),
# on_next(371, 6),
# on_next(372, 8),
# on_next(373, 5),
# on_next(374, 6),
# on_next(391, 7),
# on_next(411, 13),
# on_next(431, 2),
# on_next(432, 7),
# on_next(433, 13),
# on_next(434, 2),
# on_next(450, 9),
# on_next(520, 11),
# on_next(560, 20),
# on_next(562, 11),
# on_next(563, 20),
# on_error(600, ex),
# ]
# assert xs.subscriptions == [subscribe(200, 600)]
# def test_replay_time_lambda_zip_dispose(self):
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(
# on_next(110, 7),
# on_next(220, 3),
# on_next(280, 4),
# on_next(290, 1),
# on_next(340, 8),
# on_next(360, 5),
# on_next(370, 6),
# on_next(390, 7),
# on_next(410, 13),
# on_next(430, 2),
# on_next(450, 9),
# on_next(520, 11),
# on_next(560, 20),
# on_completed(600),
# )
# def create():
# def mapper(_xs):
# return _xs.take(6).repeat()
# return xs.pipe(ops.replay(mapper, None, 50))
# results = scheduler.start(create, disposed=470)
# assert results.messages == [
# on_next(221, 3),
# on_next(281, 4),
# on_next(291, 1),
# on_next(341, 8),
# on_next(361, 5),
# on_next(371, 6),
# on_next(372, 8),
# on_next(373, 5),
# on_next(374, 6),
# on_next(391, 7),
# on_next(411, 13),
# on_next(431, 2),
# on_next(432, 7),
# on_next(433, 13),
# on_next(434, 2),
# on_next(450, 9),
# ]
# assert xs.subscriptions == [subscribe(200, 470)]
| 31.430993
| 506
| 0.544719
|
d79c0ba8795ccc18c6e2efb754255d8bb1dbc735
| 187
|
py
|
Python
|
tests/context.py
|
rwilson4/bootstrap-sta
|
da9c373bbe8556e21e76167e80ac0cb2dbf07505
|
[
"Apache-2.0"
] | null | null | null |
tests/context.py
|
rwilson4/bootstrap-sta
|
da9c373bbe8556e21e76167e80ac0cb2dbf07505
|
[
"Apache-2.0"
] | null | null | null |
tests/context.py
|
rwilson4/bootstrap-sta
|
da9c373bbe8556e21e76167e80ac0cb2dbf07505
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
sys.path.insert(
0,
os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "bootstrap_stat")
),
)
import bootstrap_stat
import datasets
| 14.384615
| 71
| 0.657754
|
f6d922bc93cf0528dc10bcb6b57ffa5c552e6bdc
| 7,423
|
py
|
Python
|
server/tests/test_iptools.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 4
|
2017-08-30T03:17:34.000Z
|
2019-09-24T08:57:41.000Z
|
server/tests/test_iptools.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 14
|
2017-09-02T03:53:14.000Z
|
2022-03-11T23:19:04.000Z
|
server/tests/test_iptools.py
|
natowi/pepi
|
22df696209ac2545d1e8e1cf0c8822725acadb29
|
[
"Apache-2.0"
] | 3
|
2019-03-27T18:33:25.000Z
|
2021-07-17T02:18:19.000Z
|
import netifaces
from server import IPTools
# noinspection PyAttributeOutsideInit,PyPep8
class TestIPTools(object):
"""
Unit tests for IPTools module, mainly checking that it
extracts the correct IPs from the netifaces package
and that they are correctly formatted.
"""
def test_current_ip(self, monkeypatch):
def mock_netifaces_ifaddrs(_):
if self.first_mock_netifaces:
self.first_mock_netifaces = False
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0', 'addr': u'10.0.0.25'}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024, 'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
else:
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}]}
self.first_mock_netifaces = True
monkeypatch.setattr('netifaces.ifaddresses', mock_netifaces_ifaddrs)
ip_list = IPTools.current_ips()
assert len(ip_list) == 1
assert ip_list[0] == '10.0.0.25'
def test_current_ip_for_multiple(self, monkeypatch):
def mock_netifaces_ifaddrs(_):
self.mock_netifaces_counter += 1
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0', 'addr': u'10.0.0.{}'.format(self.mock_netifaces_counter)}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024, 'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
self.mock_netifaces_counter = 1
monkeypatch.setattr('netifaces.ifaddresses', mock_netifaces_ifaddrs)
ip_list = IPTools.current_ips()
interface_ip_count = [x+2 for x in range(len(netifaces.interfaces()))]
zipped = zip(ip_list, interface_ip_count)
for ip, count in zipped:
assert ip == '10.0.0.{}'.format(count)
def test_no_best_candidate_ip_no_gateway(self, monkeypatch):
def mock_netifaces_ifaddrs(_):
self.mock_netifaces_counter += 1
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0', 'addr': u'65.23.23.{}'.format(self.mock_netifaces_counter)}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024, 'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
self.mock_netifaces_counter = 1
monkeypatch.setattr('netifaces.ifaddresses', mock_netifaces_ifaddrs)
ip_list = IPTools.current_ips()
interface_ip_count = [x+2 for x in range(len(netifaces.interfaces()))]
zipped = zip(ip_list, interface_ip_count)
for ip, count in zipped:
assert ip == '65.23.23.{}'.format(count)
def test_no_best_candidate_no_gateway(self, monkeypatch):
def mock_netifaces_gateways():
return {}
def mock_netifaces_ifaddrs(_):
self.mock_netifaces_counter += 1
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0', 'addr': u'127.0.0.{}'.format(self.mock_netifaces_counter)}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024, 'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
self.mock_netifaces_counter = 1
monkeypatch.setattr('netifaces.ifaddresses', mock_netifaces_ifaddrs)
monkeypatch.setattr('netifaces.gateways', mock_netifaces_gateways)
ip_list = IPTools.current_ips()
interface_ip_count = [x + 2 for x in range(len(netifaces.interfaces()))]
zipped = zip(ip_list, interface_ip_count)
for ip, count in zipped:
assert ip == '127.0.0.{}'.format(count)
def test_gateway_ip(self, monkeypatch):
def mock_netifaces_gateways():
return {'default': {2: (u'10.0.0.1', u'en4')}, 2: [(u'10.0.0.1', u'en4', True)],
30: [(u'fe80::%utun0', u'utun0', False)]}
monkeypatch.setattr('netifaces.gateways', mock_netifaces_gateways)
gateway_ip = IPTools.gateway_ip()
assert gateway_ip == '10.0.0.1'
def test_no_gateway_ip(self, monkeypatch):
def mock_netifaces_gateways():
return {}
monkeypatch.setattr('netifaces.gateways', mock_netifaces_gateways)
gateway_ip = IPTools.gateway_ip()
assert not gateway_ip
def test_current_ips_without_gateway(self, monkeypatch):
def mock_netifaces_gateways():
return {}
def mock_netifaces_ifaddrs(_):
if self.first_mock_netifaces:
self.first_mock_netifaces = False
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0', 'addr': u'127.0.0.1'}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024, 'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
else:
self.mock_netifaces_counter += 1
return {netifaces.AF_LINK: [{'addr': u'00:e0:4c:68:01:cc'}],
netifaces.AF_INET: [{'broadcast': u'10.0.0.255', 'netmask': u'255.255.255.0',
'addr': u'10.0.0.{}'.format(self.mock_netifaces_counter)}],
netifaces.AF_INET6: [{'netmask': u'ffff:ffff:ffff:ffff::/64', 'flags': 1024,
'addr': u'fe80::81a:bbbb:5899:f449%en4'}]}
self.first_mock_netifaces = True
self.mock_netifaces_counter = 1
monkeypatch.setattr('netifaces.gateways', mock_netifaces_gateways)
monkeypatch.setattr('netifaces.ifaddresses', mock_netifaces_ifaddrs)
ip_list = IPTools.current_ips()
interface_ip_count = [x+2 for x in range(len(netifaces.interfaces()))]
assert '127.0.0.1' not in ip_list
zipped = zip(ip_list, interface_ip_count)
for ip, count in zipped:
assert ip == '10.0.0.{}'.format(count)
def test_get_first_digits_from(self):
test_ip = '10.0.0.0'
expected = '10.0.0.'
assert expected == IPTools.get_first_digits_from(test_ip, 3)
expected = '10.0.0'
assert expected == IPTools.get_first_digits_from(test_ip, 3, with_dot=False)
expected = '10'
assert expected == IPTools.get_first_digits_from(test_ip, 1, with_dot=False)
expected = '10.0.0.0'
assert expected == IPTools.get_first_digits_from(test_ip, 10, with_dot=False)
def test_get_subnet_from(self):
test_ip = '127.0.0.1'
expected = '127.0.0.'
assert expected == IPTools.get_subnet_from(test_ip)
expected = '127.0.0.'
assert expected == IPTools.get_subnet_from(test_ip, with_dot=True)
expected = '127.0.0'
assert expected == IPTools.get_subnet_from(test_ip, with_dot=False)
test_ip = '127.0.0.0.0.0.0.0'
assert expected == IPTools.get_subnet_from(test_ip, with_dot=False)
test_ip = '127.0.'
expected = '127.0'
assert expected == IPTools.get_subnet_from(test_ip, with_dot=False)
expected = '127.0.'
assert expected == IPTools.get_subnet_from(test_ip, with_dot=True)
| 49.818792
| 152
| 0.617136
|
8d1f5f2493f90f7b13e21f5f7737b770e53f3aee
| 1,737
|
py
|
Python
|
alfred/utils/mana.py
|
ckmessi/alfred
|
48f85f43ee89d4370e1ef5a5ce1158dffc0596d4
|
[
"Apache-2.0"
] | 643
|
2018-02-04T14:15:28.000Z
|
2022-03-30T14:25:15.000Z
|
alfred/utils/mana.py
|
ckmessi/alfred
|
48f85f43ee89d4370e1ef5a5ce1158dffc0596d4
|
[
"Apache-2.0"
] | 22
|
2019-07-26T15:51:19.000Z
|
2022-02-28T16:59:58.000Z
|
alfred/utils/mana.py
|
ckmessi/alfred
|
48f85f43ee89d4370e1ef5a5ce1158dffc0596d4
|
[
"Apache-2.0"
] | 121
|
2018-02-21T05:33:31.000Z
|
2022-03-04T09:30:44.000Z
|
#
# Copyright (c) 2020 JinTian.
#
# This file is part of alfred
# (see http://jinfagang.github.io).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utils using in MANA universe
such as print welcome message
"""
from colorama import Fore, Back, Style
welcome_msg = '''
__ ______ _ _____ ___ ____
/ |/ / | / | / / | / | / _/
/ /|_/ / /| | / |/ / /| | / /| | / /
/ / / / ___ |/ /| / ___ |/ ___ |_/ /
/_/ /_/_/ |_/_/ |_/_/ |_/_/ |_/___/ http://manaai.cn
'''
def welcome(ori_git_url):
print(Fore.YELLOW + Style.BRIGHT + 'Welcome to MANA AI platform!' + Style.RESET_ALL)
print(Fore.BLUE + Style.BRIGHT + welcome_msg + Style.RESET_ALL)
print(Style.BRIGHT + "once you saw this msg, indicates you were back supported by our team!" + Style.RESET_ALL)
print('the latest updates of our codes always at: {} or {}'.format(ori_git_url, 'http://manaai.cn'))
print('NOTE: Our codes distributed from anywhere else were not supported!')
| 37.76087
| 115
| 0.674151
|
8bd674e6a7fc70ab5122af454e3cb98a86acf01c
| 20,602
|
py
|
Python
|
test/test_grid_file.py
|
tony/mongo-python-driver
|
d43ca118f91dda373356802ee8ec976d96c366b9
|
[
"Apache-2.0"
] | 2
|
2019-02-28T08:55:19.000Z
|
2019-02-28T08:55:31.000Z
|
test/test_grid_file.py
|
tony/mongo-python-driver
|
d43ca118f91dda373356802ee8ec976d96c366b9
|
[
"Apache-2.0"
] | null | null | null |
test/test_grid_file.py
|
tony/mongo-python-driver
|
d43ca118f91dda373356802ee8ec976d96c366b9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the grid_file module.
"""
import datetime
import sys
sys.path[0:0] = [""]
from bson.objectid import ObjectId
from bson.py3compat import StringIO
from gridfs import GridFS
from gridfs.grid_file import (DEFAULT_CHUNK_SIZE,
_SEEK_CUR,
_SEEK_END,
GridIn,
GridOut,
GridOutCursor)
from gridfs.errors import NoFile
from pymongo import MongoClient
from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError
from test import (IntegrationTest,
unittest,
qcheck)
from test.utils import rs_or_single_client
class TestGridFileNoConnect(unittest.TestCase):
"""Test GridFile features on a client that does not connect.
"""
@classmethod
def setUpClass(cls):
cls.db = MongoClient(connect=False).pymongo_test
def test_grid_in_custom_opts(self):
self.assertRaises(TypeError, GridIn, "foo")
a = GridIn(self.db.fs, _id=5, filename="my_file",
contentType="text/html", chunkSize=1000, aliases=["foo"],
metadata={"foo": 1, "bar": 2}, bar=3, baz="hello")
self.assertEqual(5, a._id)
self.assertEqual("my_file", a.filename)
self.assertEqual("my_file", a.name)
self.assertEqual("text/html", a.content_type)
self.assertEqual(1000, a.chunk_size)
self.assertEqual(["foo"], a.aliases)
self.assertEqual({"foo": 1, "bar": 2}, a.metadata)
self.assertEqual(3, a.bar)
self.assertEqual("hello", a.baz)
self.assertRaises(AttributeError, getattr, a, "mike")
b = GridIn(self.db.fs,
content_type="text/html", chunk_size=1000, baz=100)
self.assertEqual("text/html", b.content_type)
self.assertEqual(1000, b.chunk_size)
self.assertEqual(100, b.baz)
class TestGridFile(IntegrationTest):
def setUp(self):
self.db.drop_collection('fs.files')
self.db.drop_collection('fs.chunks')
def test_basic(self):
f = GridIn(self.db.fs, filename="test")
f.write(b"hello world")
f.close()
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"hello world", g.read())
# make sure it's still there...
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"hello world", g.read())
f = GridIn(self.db.fs, filename="test")
f.close()
self.assertEqual(2, self.db.fs.files.count_documents({}))
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"", g.read())
# test that reading 0 returns proper type
self.assertEqual(b"", g.read(0))
def test_md5(self):
f = GridIn(self.db.fs)
f.write(b"hello world\n")
f.close()
self.assertEqual("6f5902ac237024bdd0c176cb93063dc4", f.md5)
def test_alternate_collection(self):
self.db.alt.files.delete_many({})
self.db.alt.chunks.delete_many({})
f = GridIn(self.db.alt)
f.write(b"hello world")
f.close()
self.assertEqual(1, self.db.alt.files.count_documents({}))
self.assertEqual(1, self.db.alt.chunks.count_documents({}))
g = GridOut(self.db.alt, f._id)
self.assertEqual(b"hello world", g.read())
# test that md5 still works...
self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", g.md5)
def test_grid_in_default_opts(self):
self.assertRaises(TypeError, GridIn, "foo")
a = GridIn(self.db.fs)
self.assertTrue(isinstance(a._id, ObjectId))
self.assertRaises(AttributeError, setattr, a, "_id", 5)
self.assertEqual(None, a.filename)
self.assertEqual(None, a.name)
a.filename = "my_file"
self.assertEqual("my_file", a.filename)
self.assertEqual("my_file", a.name)
self.assertEqual(None, a.content_type)
a.content_type = "text/html"
self.assertEqual("text/html", a.content_type)
self.assertRaises(AttributeError, getattr, a, "length")
self.assertRaises(AttributeError, setattr, a, "length", 5)
self.assertEqual(255 * 1024, a.chunk_size)
self.assertRaises(AttributeError, setattr, a, "chunk_size", 5)
self.assertRaises(AttributeError, getattr, a, "upload_date")
self.assertRaises(AttributeError, setattr, a, "upload_date", 5)
self.assertRaises(AttributeError, getattr, a, "aliases")
a.aliases = ["foo"]
self.assertEqual(["foo"], a.aliases)
self.assertRaises(AttributeError, getattr, a, "metadata")
a.metadata = {"foo": 1}
self.assertEqual({"foo": 1}, a.metadata)
self.assertRaises(AttributeError, setattr, a, "md5", 5)
a.close()
a.forty_two = 42
self.assertEqual(42, a.forty_two)
self.assertTrue(isinstance(a._id, ObjectId))
self.assertRaises(AttributeError, setattr, a, "_id", 5)
self.assertEqual("my_file", a.filename)
self.assertEqual("my_file", a.name)
self.assertEqual("text/html", a.content_type)
self.assertEqual(0, a.length)
self.assertRaises(AttributeError, setattr, a, "length", 5)
self.assertEqual(255 * 1024, a.chunk_size)
self.assertRaises(AttributeError, setattr, a, "chunk_size", 5)
self.assertTrue(isinstance(a.upload_date, datetime.datetime))
self.assertRaises(AttributeError, setattr, a, "upload_date", 5)
self.assertEqual(["foo"], a.aliases)
self.assertEqual({"foo": 1}, a.metadata)
self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5)
self.assertRaises(AttributeError, setattr, a, "md5", 5)
# Make sure custom attributes that were set both before and after
# a.close() are reflected in b. PYTHON-411.
b = GridFS(self.db).get_last_version(filename=a.filename)
self.assertEqual(a.metadata, b.metadata)
self.assertEqual(a.aliases, b.aliases)
self.assertEqual(a.forty_two, b.forty_two)
def test_grid_out_default_opts(self):
self.assertRaises(TypeError, GridOut, "foo")
gout = GridOut(self.db.fs, 5)
with self.assertRaises(NoFile):
gout.name
a = GridIn(self.db.fs)
a.close()
b = GridOut(self.db.fs, a._id)
self.assertEqual(a._id, b._id)
self.assertEqual(0, b.length)
self.assertEqual(None, b.content_type)
self.assertEqual(None, b.name)
self.assertEqual(None, b.filename)
self.assertEqual(255 * 1024, b.chunk_size)
self.assertTrue(isinstance(b.upload_date, datetime.datetime))
self.assertEqual(None, b.aliases)
self.assertEqual(None, b.metadata)
self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", b.md5)
for attr in ["_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"]:
self.assertRaises(AttributeError, setattr, b, attr, 5)
def test_grid_out_cursor_options(self):
self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {},
projection={"filename": 1})
cursor = GridOutCursor(self.db.fs, {})
cursor_clone = cursor.clone()
cursor_dict = cursor.__dict__.copy()
cursor_dict.pop('_Cursor__session')
cursor_clone_dict = cursor_clone.__dict__.copy()
cursor_clone_dict.pop('_Cursor__session')
self.assertEqual(cursor_dict, cursor_clone_dict)
self.assertRaises(NotImplementedError, cursor.add_option, 0)
self.assertRaises(NotImplementedError, cursor.remove_option, 0)
def test_grid_out_custom_opts(self):
one = GridIn(self.db.fs, _id=5, filename="my_file",
contentType="text/html", chunkSize=1000, aliases=["foo"],
metadata={"foo": 1, "bar": 2}, bar=3, baz="hello")
one.write(b"hello world")
one.close()
two = GridOut(self.db.fs, 5)
self.assertEqual("my_file", two.name)
self.assertEqual("my_file", two.filename)
self.assertEqual(5, two._id)
self.assertEqual(11, two.length)
self.assertEqual("text/html", two.content_type)
self.assertEqual(1000, two.chunk_size)
self.assertTrue(isinstance(two.upload_date, datetime.datetime))
self.assertEqual(["foo"], two.aliases)
self.assertEqual({"foo": 1, "bar": 2}, two.metadata)
self.assertEqual(3, two.bar)
self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5)
for attr in ["_id", "name", "content_type", "length", "chunk_size",
"upload_date", "aliases", "metadata", "md5"]:
self.assertRaises(AttributeError, setattr, two, attr, 5)
def test_grid_out_file_document(self):
one = GridIn(self.db.fs)
one.write(b"foo bar")
one.close()
two = GridOut(self.db.fs, file_document=self.db.fs.files.find_one())
self.assertEqual(b"foo bar", two.read())
three = GridOut(self.db.fs, 5,
file_document=self.db.fs.files.find_one())
self.assertEqual(b"foo bar", three.read())
four = GridOut(self.db.fs, file_document={})
with self.assertRaises(NoFile):
four.name
def test_write_file_like(self):
one = GridIn(self.db.fs)
one.write(b"hello world")
one.close()
two = GridOut(self.db.fs, one._id)
three = GridIn(self.db.fs)
three.write(two)
three.close()
four = GridOut(self.db.fs, three._id)
self.assertEqual(b"hello world", four.read())
five = GridIn(self.db.fs, chunk_size=2)
five.write(b"hello")
buffer = StringIO(b" world")
five.write(buffer)
five.write(b" and mongodb")
five.close()
self.assertEqual(b"hello world and mongodb",
GridOut(self.db.fs, five._id).read())
def test_write_lines(self):
a = GridIn(self.db.fs)
a.writelines([b"hello ", b"world"])
a.close()
self.assertEqual(b"hello world", GridOut(self.db.fs, a._id).read())
def test_close(self):
f = GridIn(self.db.fs)
f.close()
self.assertRaises(ValueError, f.write, "test")
f.close()
def test_multi_chunk_file(self):
random_string = b'a' * (DEFAULT_CHUNK_SIZE + 1000)
f = GridIn(self.db.fs)
f.write(random_string)
f.close()
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(2, self.db.fs.chunks.count_documents({}))
g = GridOut(self.db.fs, f._id)
self.assertEqual(random_string, g.read())
def test_small_chunks(self):
self.files = 0
self.chunks = 0
def helper(data):
f = GridIn(self.db.fs, chunkSize=1)
f.write(data)
f.close()
self.files += 1
self.chunks += len(data)
self.assertEqual(self.files, self.db.fs.files.count_documents({}))
self.assertEqual(self.chunks, self.db.fs.chunks.count_documents({}))
g = GridOut(self.db.fs, f._id)
self.assertEqual(data, g.read())
g = GridOut(self.db.fs, f._id)
self.assertEqual(data, g.read(10) + g.read(10))
return True
qcheck.check_unittest(self, helper,
qcheck.gen_string(qcheck.gen_range(0, 20)))
def test_seek(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write(b"hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"hello world", g.read())
g.seek(0)
self.assertEqual(b"hello world", g.read())
g.seek(1)
self.assertEqual(b"ello world", g.read())
self.assertRaises(IOError, g.seek, -1)
g.seek(-3, _SEEK_END)
self.assertEqual(b"rld", g.read())
g.seek(0, _SEEK_END)
self.assertEqual(b"", g.read())
self.assertRaises(IOError, g.seek, -100, _SEEK_END)
g.seek(3)
g.seek(3, _SEEK_CUR)
self.assertEqual(b"world", g.read())
self.assertRaises(IOError, g.seek, -100, _SEEK_CUR)
def test_tell(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write(b"hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(0, g.tell())
g.read(0)
self.assertEqual(0, g.tell())
g.read(1)
self.assertEqual(1, g.tell())
g.read(2)
self.assertEqual(3, g.tell())
g.read()
self.assertEqual(g.length, g.tell())
def test_multiple_reads(self):
f = GridIn(self.db.fs, chunkSize=3)
f.write(b"hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"he", g.read(2))
self.assertEqual(b"ll", g.read(2))
self.assertEqual(b"o ", g.read(2))
self.assertEqual(b"wo", g.read(2))
self.assertEqual(b"rl", g.read(2))
self.assertEqual(b"d", g.read(2))
self.assertEqual(b"", g.read(2))
def test_readline(self):
f = GridIn(self.db.fs, chunkSize=5)
f.write((b"""Hello world,
How are you?
Hope all is well.
Bye"""))
f.close()
# Try read(), then readline().
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"H", g.read(1))
self.assertEqual(b"ello world,\n", g.readline())
self.assertEqual(b"How a", g.readline(5))
self.assertEqual(b"", g.readline(0))
self.assertEqual(b"re you?\n", g.readline())
self.assertEqual(b"Hope all is well.\n", g.readline(1000))
self.assertEqual(b"Bye", g.readline())
self.assertEqual(b"", g.readline())
# Try readline() first, then read().
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"He", g.readline(2))
self.assertEqual(b"l", g.read(1))
self.assertEqual(b"lo", g.readline(2))
self.assertEqual(b" world,\n", g.readline())
# Only readline().
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"H", g.readline(1))
self.assertEqual(b"e", g.readline(1))
self.assertEqual(b"llo world,\n", g.readline())
def test_iterator(self):
f = GridIn(self.db.fs)
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual([], list(g))
f = GridIn(self.db.fs)
f.write(b"hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual([b"hello world"], list(g))
self.assertEqual(b"hello", g.read(5))
self.assertEqual([b"hello world"], list(g))
self.assertEqual(b" worl", g.read(5))
f = GridIn(self.db.fs, chunk_size=2)
f.write(b"hello world")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual([b"he", b"ll", b"o ",
b"wo", b"rl", b"d"], list(g))
def test_read_unaligned_buffer_size(self):
in_data = (b"This is a text that doesn't "
b"quite fit in a single 16-byte chunk.")
f = GridIn(self.db.fs, chunkSize=16)
f.write(in_data)
f.close()
g = GridOut(self.db.fs, f._id)
out_data = b''
while 1:
s = g.read(13)
if not s:
break
out_data += s
self.assertEqual(in_data, out_data)
def test_readchunk(self):
in_data = b'a' * 10
f = GridIn(self.db.fs, chunkSize=3)
f.write(in_data)
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(3, len(g.readchunk()))
self.assertEqual(2, len(g.read(2)))
self.assertEqual(1, len(g.readchunk()))
self.assertEqual(3, len(g.read(3)))
self.assertEqual(1, len(g.readchunk()))
self.assertEqual(0, len(g.readchunk()))
def test_write_unicode(self):
f = GridIn(self.db.fs)
self.assertRaises(TypeError, f.write, u"foo")
f = GridIn(self.db.fs, encoding="utf-8")
f.write(u"foo")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(b"foo", g.read())
f = GridIn(self.db.fs, encoding="iso-8859-1")
f.write(u"aé")
f.close()
g = GridOut(self.db.fs, f._id)
self.assertEqual(u"aé".encode("iso-8859-1"), g.read())
def test_set_after_close(self):
f = GridIn(self.db.fs, _id="foo", bar="baz")
self.assertEqual("foo", f._id)
self.assertEqual("baz", f.bar)
self.assertRaises(AttributeError, getattr, f, "baz")
self.assertRaises(AttributeError, getattr, f, "uploadDate")
self.assertRaises(AttributeError, setattr, f, "_id", 5)
f.bar = "foo"
f.baz = 5
self.assertEqual("foo", f._id)
self.assertEqual("foo", f.bar)
self.assertEqual(5, f.baz)
self.assertRaises(AttributeError, getattr, f, "uploadDate")
f.close()
self.assertEqual("foo", f._id)
self.assertEqual("foo", f.bar)
self.assertEqual(5, f.baz)
self.assertTrue(f.uploadDate)
self.assertRaises(AttributeError, setattr, f, "_id", 5)
f.bar = "a"
f.baz = "b"
self.assertRaises(AttributeError, setattr, f, "upload_date", 5)
g = GridOut(self.db.fs, f._id)
self.assertEqual("a", g.bar)
self.assertEqual("b", g.baz)
# Versions 2.0.1 and older saved a _closed field for some reason.
self.assertRaises(AttributeError, getattr, g, "_closed")
def test_context_manager(self):
contents = b"Imagine this is some important data..."
with GridIn(self.db.fs, filename="important") as infile:
infile.write(contents)
with GridOut(self.db.fs, infile._id) as outfile:
self.assertEqual(contents, outfile.read())
def test_prechunked_string(self):
def write_me(s, chunk_size):
buf = StringIO(s)
infile = GridIn(self.db.fs)
while True:
to_write = buf.read(chunk_size)
if to_write == b'':
break
infile.write(to_write)
infile.close()
buf.close()
outfile = GridOut(self.db.fs, infile._id)
data = outfile.read()
self.assertEqual(s, data)
s = b'x' * DEFAULT_CHUNK_SIZE * 4
# Test with default chunk size
write_me(s, DEFAULT_CHUNK_SIZE)
# Multiple
write_me(s, DEFAULT_CHUNK_SIZE * 3)
# Custom
write_me(s, 262300)
def test_grid_out_lazy_connect(self):
fs = self.db.fs
outfile = GridOut(fs, file_id=-1)
self.assertRaises(NoFile, outfile.read)
self.assertRaises(NoFile, getattr, outfile, 'filename')
infile = GridIn(fs, filename=1)
infile.close()
outfile = GridOut(fs, infile._id)
outfile.read()
outfile.filename
outfile = GridOut(fs, infile._id)
outfile.readchunk()
def test_grid_in_lazy_connect(self):
client = MongoClient('badhost', connect=False,
serverSelectionTimeoutMS=10)
fs = client.db.fs
infile = GridIn(fs, file_id=-1, chunk_size=1)
self.assertRaises(ServerSelectionTimeoutError, infile.write, b'data')
self.assertRaises(ServerSelectionTimeoutError, infile.close)
def test_unacknowledged(self):
# w=0 is prohibited.
with self.assertRaises(ConfigurationError):
GridIn(rs_or_single_client(w=0).pymongo_test.fs)
if __name__ == "__main__":
unittest.main()
| 33.122186
| 80
| 0.593438
|
18cfc7bf22d2b3c47278ae817ea521a71e77e200
| 5,614
|
py
|
Python
|
app/lms/tests/apis/test_edit_loan.py
|
SaurabhPanja/redcarpet-lms
|
6f3104a8b94fb27f0af57bc88f38c4929cfd03e8
|
[
"MIT"
] | null | null | null |
app/lms/tests/apis/test_edit_loan.py
|
SaurabhPanja/redcarpet-lms
|
6f3104a8b94fb27f0af57bc88f38c4929cfd03e8
|
[
"MIT"
] | null | null | null |
app/lms/tests/apis/test_edit_loan.py
|
SaurabhPanja/redcarpet-lms
|
6f3104a8b94fb27f0af57bc88f38c4929cfd03e8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from lms.models import User, Loan, EditLoanHistory
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
import re
from lms.utils import dprint
import time
from pprint import pprint
from .helper_setUp_func import setUp_users
class EditLoanTest(TestCase):
def setUp(self):
self.client = APIClient()
self.customer_login_token, self.agent_login_token, self.admin_login_token = setUp_users()
all_admins = User.objects.filter(role='admin')
self.admin_1 = all_admins.first()
self.admin_2 = all_admins.last()
all_agents = User.objects.filter(role='agent')
self.agent_1 = all_agents.first()
self.agent_2 = all_agents.last()
all_customers = User.objects.filter(role='customer')
self.customer_1 = all_customers.first()
self.customer_2 = all_customers.last()
self.customer_loan_request = {
'customer-id' : self.customer_1.id,
'principal-amount' : "10000",
'interest-rate' : "1",
'tenure-months' : "12"
}
self.edit_loan_request = {
'principal-amount' : 50000,
'interest-rate' : 3,
'tenure-months' : 11
}
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
self.client.post(reverse('lms:create_loan'), self.customer_loan_request)
self.loan_edit_url = reverse('lms:edit_loan', kwargs={'id' : Loan.objects.last().id})
def test_edit_loan_by_customer(self):
self.client.credentials(HTTP_AUTHORIZATION=self.customer_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_edit_loan_by_admin(self):
self.client.credentials(HTTP_AUTHORIZATION=self.admin_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_edit_loan_by_agent(self):
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
loan_obj = Loan.objects.last()
self.assertEqual(loan_obj.principal_amount, self.edit_loan_request['principal-amount'])
self.assertEqual(loan_obj.tenure_months, self.edit_loan_request['tenure-months'])
self.assertEqual(loan_obj.interest_rate, self.edit_loan_request['interest-rate'])
self.assertEqual(loan_obj.emi, 5403.87)
def test_edit_non_existent_loan(self):
non_existent_loan_edit_url = reverse('lms:edit_loan', kwargs={'id' : 878})
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(non_existent_loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_edit_approved_loan(self):
loan_obj = Loan.objects.last()
loan_obj.status = 'approved'
loan_obj.save()
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_reject_a_loan_by_agent(self):
self.edit_loan_request['status'] = 'rejected'
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(Loan.objects.last().status, 'rejected')
def test_approve_a_loan_by_agent(self):
self.edit_loan_request['status'] = 'approved'
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_invalid_loan_request(self):
invalid_loan_request = {
'principal-amount' : "one lakh",
'tenure-months' : "10 months",
'interest_rate' : 4000,
}
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, invalid_loan_request)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_save_edited_loan(self):
last_loan_obj = Loan.objects.last()
self.client.credentials(HTTP_AUTHORIZATION=self.agent_login_token)
res = self.client.post(self.loan_edit_url, self.edit_loan_request)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
edit_loan = EditLoanHistory.objects.last()
self.assertEqual(edit_loan.principal_amount, last_loan_obj.principal_amount)
self.assertEqual(edit_loan.interest_rate, last_loan_obj.interest_rate)
self.assertEqual(edit_loan.tenure_months, last_loan_obj.tenure_months)
self.assertEqual(edit_loan.emi, last_loan_obj.emi)
self.assertEqual(edit_loan.status, last_loan_obj.status)
self.assertEqual(edit_loan.created_at, last_loan_obj.created_at)
self.assertEqual(edit_loan.created_for, last_loan_obj.created_for)
self.assertEqual(edit_loan.created_by, last_loan_obj.created_by)
self.assertEqual(edit_loan.approved_by, last_loan_obj.approved_by)
| 43.184615
| 100
| 0.711079
|
3708c686d72cc27d9222e8555ddac13fd7181427
| 9,740
|
py
|
Python
|
src/doblib/action.py
|
initOS/dob-lib
|
a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b
|
[
"Apache-2.0"
] | null | null | null |
src/doblib/action.py
|
initOS/dob-lib
|
a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b
|
[
"Apache-2.0"
] | null | null | null |
src/doblib/action.py
|
initOS/dob-lib
|
a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b
|
[
"Apache-2.0"
] | null | null | null |
# © 2021 Florian Kantelberg (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import random
import string
import uuid
from datetime import date, datetime, timedelta
from . import base, env, utils
ALNUM = string.ascii_letters + string.digits
def load_action_arguments(args, actions=None):
parser = utils.default_parser("action")
parser.add_argument(
"action",
metavar="action",
choices=actions or (),
help=f"Action to run. Possible choices: {','.join(actions)}",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Run the action as a dry-run and don't commit changes",
)
return parser.parse_known_args(args)
class ActionEnvironment(env.Environment):
""" Class to apply actions in the environment """
def _apply(self, rec, name, **kw):
""" Apply an action on a field of a record """
field_type = rec._fields[name].type
if field_type == "boolean":
return self._boolean(rec, name=name, **kw)
if field_type == "integer":
return self._integer(rec, name=name, **kw)
if field_type in ("float", "monetary"):
return self._float(rec, name=name, **kw)
if field_type == "date":
return self._date(rec, name=name, **kw)
if field_type == "datetime":
return self._datetime(rec, name=name, **kw)
if field_type in ("char", "html", "text"):
return self._text(rec, name=name, **kw)
raise TypeError("Field type is not supported by action handler")
def _boolean(self, rec, **kw):
"""Return a value for boolean fields depending on the arguments
* Take the value from a field of the record and interpret as boolean
* Randomly True or False
"""
field = kw.get("field")
# Use the value of a different field
if field:
return bool(rec[field])
return random.choice((False, True))
def _integer(self, rec, **kw):
"""Return a value for integer fields depending on the arguments
* Take the value from a `field` of the record
* Random value between `lower` and `upper`
"""
lower = kw.get("lower", None)
upper = kw.get("upper", None)
field = kw.get("field", None)
# Use the value of a different field
if field:
return rec[field]
# Randomize the value
if isinstance(lower, int) and isinstance(upper, int):
return random.randint(lower, upper)
raise TypeError("Lower and upper bounds must be integer")
def _float(self, rec, **kw):
"""Return a value for float fields depending on the arguments
* Take the value from a `field` of the record
* Random value between `lower` and `upper`
"""
lower = kw.get("lower", 0.0)
upper = kw.get("upper", 1.0)
field = kw.get("field", None)
# Use the value of a different field
if field:
return rec[field]
# Randomize the value
return random.random() * (upper - lower) + lower
def _text(self, rec, **kw):
"""Return a value for text fields depending on the arguments
* Generate a UUID if `uuid` is set. Support UUID1 and UUID4
* Take the value from a `field` of the record. Add `prefix` and `suffix`
* Random alphanumeric string with specific `length`. Add `prefix` and `suffix`
* Current value of the field with `prefix` and `suffix` added
"""
prefix = kw.get("prefix", "")
suffix = kw.get("suffix", "")
length = kw.get("length", None)
field = kw.get("field", None)
vuuid = kw.get("uuid", None)
# Support for uuid1 and uuid4
if vuuid == 1:
return str(uuid.uuid1())
if vuuid == 4:
return str(uuid.uuid4())
# Use the value of a different field
if isinstance(field, str):
return f"{prefix}{rec[field]}{suffix}"
# Randomize the value
if isinstance(length, int) and length > 0:
return prefix + "".join(random.choices(ALNUM, k=length)) + suffix
return prefix + rec[kw["name"]] + suffix
def _datetime(self, rec, **kw):
"""Return a value for datetime fields depending on the arguments
* Take the value from a `field` of the record
* Random value between `lower` and `upper`
"""
lower = kw.get("lower", datetime(1970, 1, 1))
upper = kw.get("upper", datetime.now())
field = kw.get("field", None)
if field:
return rec[field]
diff = upper - lower
return lower + timedelta(seconds=random.randint(0, diff.seconds))
def _date(self, rec, **kw):
"""Return a value for date fields depending on the arguments
* Take the value from a `field` of the record
* Random value between `lower` and `upper`
"""
lower = kw.get("lower", date(1970, 1, 1))
upper = kw.get("upper", date.today())
field = kw.get("field", None)
if field:
return rec[field]
return lower + timedelta(days=random.randint(0, (upper - lower).days))
def _replace_references(self, env, references, values):
resolved_refs = {}
for key, val in references.items():
resolved_refs[key] = env.ref(val).id
self._replace_recursively(values, resolved_refs)
def _replace_recursively(self, value, replace_dict):
if isinstance(value, dict):
iterator = value
elif isinstance(value, list):
iterator = range(0, len(value))
else:
return
for index in iterator:
if isinstance(value[index], str):
if value[index] in replace_dict:
value[index] = replace_dict[value[index]]
else:
self._replace_recursively(value[index], replace_dict)
def _action_delete(self, env, model, domain, references):
""" Runs the delete action """
if model in env:
self._replace_references(env, references, domain)
records = env[model].with_context(active_test=False).search(domain)
if records:
records.unlink()
def _action_update(self, env, model, domain, references, values):
""" Runs the update action """
if not values or model not in env:
return
self._replace_references(env, references, domain)
self._replace_references(env, references, values)
records = env[model].with_context(active_test=False).search(domain)
if not records:
return
# Split the values in constant and dynamic
const, dynamic = {}, {}
for name, apply_act in values.items():
if name not in records._fields:
continue
if isinstance(apply_act, dict):
dynamic[name] = apply_act
else:
const[name] = apply_act
# Handle the constant values
if const:
records.write(const)
# Handle the dynamic values
if dynamic:
for rec in records:
vals = {}
for name, apply_act in dynamic.items():
vals[name] = self._apply(rec, name, **apply_act)
rec.write(vals)
def _action_insert(self, env, model, domain, references, values):
if not domain or not values or model not in env or env[model].search(domain):
return
self._replace_references(env, references, domain)
self._replace_references(env, references, values)
env[model].with_context(active_test=False).create(values)
def apply_action(self, args=None):
""" Apply in the configuration defined actions on the database """
actions = self.get("actions", default={})
args, _ = load_action_arguments(args or [], list(actions))
if not self._init_odoo():
return
# pylint: disable=C0415,E0401
import odoo
from odoo.tools import config
# Load the Odoo configuration
config.parse_config(["-c", base.ODOO_CONFIG])
odoo.cli.server.report_configuration()
db_name = config["db_name"]
utils.info(f"Running {args.action}")
with self._manage():
with self.env(db_name) as env:
for name, item in actions[args.action].items():
utils.info(f"{args.action.capitalize()} {name}")
model = item.get("model")
if not isinstance(model, str):
utils.error("Model must be string")
continue
domain = item.get("domain", [])
if not isinstance(domain, list):
utils.error("Domain must be list")
continue
act = item.get("action", "update")
references = item.get("references", {})
if act == "update":
values = item.get("values", {})
self._action_update(env, model, domain, references, values)
elif act == "delete":
self._action_delete(env, model, domain, references)
elif act == "insert":
values = item.get("values", {})
self._action_insert(env, model, domain, references, values)
else:
utils.error(f"Undefined action {act}")
| 34.910394
| 86
| 0.565708
|
14548b8a071b679cb6705cc8cd9306283370e027
| 1,567
|
py
|
Python
|
folks/app.py
|
marinintim/folks
|
2dce457c9d57da34626717667b942fa91f62385f
|
[
"MIT"
] | 4
|
2019-12-02T20:04:55.000Z
|
2020-04-30T22:14:30.000Z
|
folks/app.py
|
marinintim/folks
|
2dce457c9d57da34626717667b942fa91f62385f
|
[
"MIT"
] | null | null | null |
folks/app.py
|
marinintim/folks
|
2dce457c9d57da34626717667b942fa91f62385f
|
[
"MIT"
] | null | null | null |
import click
from flask import Flask, jsonify, request
from flask_cors import CORS
import logging
from config import Config
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
app = Flask(__name__)
app.config.from_object(Config)
CORS(app)
# lower than app = due to circular dependencies
from api import bp as api_bp
from api.errors import error_response as api_err
from database import db_session
app.register_blueprint(api_bp, url_prefix='/api/v2')
@app.cli.command('createdevadmin')
@click.argument('name')
def creatdeveadmin(name):
from models.user import create_user
from schema import registration_schema
registration = registration_schema.load({
'username': name,
'password': 'qwerty',
'invite': 'superuser'
})
u = create_user(registration)
db_session.add(u)
db_session.commit()
@app.cli.command('reset_password')
@click.argument('username')
@click.argument('password')
def reset_password(username, password):
from models.user import User
u = User.query.filter_by(username=username).first()
u.set_password(password)
db_session.add(u)
db_session.delete(u.token)
db_session.commit()
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.errorhandler(404)
def page_not_found(_error):
return api_err(404)
@app.errorhandler(500)
def internal_error(error):
db_session.rollback() # pylint: disable=no-member
return api_err(500)
@app.route('/__version__')
def version():
return jsonify(version=2)
| 23.044118
| 55
| 0.740906
|
90ca0125647abda8596b57625fd8016b57523ee8
| 33,045
|
py
|
Python
|
stable_baselines/td3/td3_mem.py
|
MouseHu/atten_baselines
|
ef79869a0a89ad73d2a3b3579c69a6a08d32f274
|
[
"MIT"
] | null | null | null |
stable_baselines/td3/td3_mem.py
|
MouseHu/atten_baselines
|
ef79869a0a89ad73d2a3b3579c69a6a08d32f274
|
[
"MIT"
] | null | null | null |
stable_baselines/td3/td3_mem.py
|
MouseHu/atten_baselines
|
ef79869a0a89ad73d2a3b3579c69a6a08d32f274
|
[
"MIT"
] | 2
|
2020-12-07T03:12:18.000Z
|
2020-12-08T12:16:47.000Z
|
import time
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.td3.policies import TD3Policy
from stable_baselines.td3.episodic_memory import EpisodicMemory
from collections import deque
class TD3Mem(OffPolicyRLModel):
"""
Twin Delayed DDPG (TD3)
Addressing Function Approximation Error in Actor-Critic Methods.
Original implementation: https://github.com/sfujim/TD3
Paper: https://arxiv.org/pdf/1802.09477.pdf
Introduction to TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
:param policy: (TD3Policy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values and Actor networks)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update" of the target networks, between 0 and 1)
:param policy_delay: (int) Policy and target networks will only be updated once every policy_delay steps
per training steps. The Q values will be updated policy_delay more often (update every training step).
:param action_noise: (ActionNoise) the action noise type. Cf DDPG for the different action noise type.
:param target_policy_noise: (float) Standard deviation of Gaussian noise added to target policy
(smoothing noise)
:param target_noise_clip: (float) Limit for absolute value of target policy smoothing noise.
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param gradient_steps: (int) How many gradient update after each step
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for TD3 normally but can help exploring when using HER + TD3.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on TD3 logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, eval_env, gamma=0.99, learning_rate=3e-4,
buffer_size=50000,
learning_starts=100, train_freq=100, gradient_steps=400, batch_size=128,
tau=0.005, policy_delay=2, action_noise=None,
nb_eval_steps=1000,
target_policy_noise=0.2, target_noise_clip=0.5, start_policy_learning=10000,
random_exploration=0.0, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None):
super(TD3Mem, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=TD3Policy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
print("TD3 Memory Agent here")
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
self.gradient_steps = gradient_steps
self.gamma = gamma
self.start_policy_learning = start_policy_learning
self.action_noise = action_noise
self.random_exploration = random_exploration
self.policy_delay = policy_delay
self.target_noise_clip = target_noise_clip
self.target_policy_noise = target_policy_noise
self.eval_env = eval_env
self.nb_eval_steps = nb_eval_steps
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy_tf = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.step_ops = None
self.target_ops = None
self.infos_names = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.policy_out = None
self.policy_train_op = None
self.policy_loss = None
self.memory = None
# self.state_repr_func = state_repr_func
# self.action_repr_func = action_repr_func
self.qf1_pi = None
self.qf2_pi = None
self.qf1_target = None
self.qf2_target = None
self.qf1_target_no_pi = None
self.qf2_target_no_pi = None
self.qvalues_ph = None
self.state_repr_t = None
self.action_repr_t = None
self.sequence = []
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
policy_out = unscale_action(self.action_space, self.policy_out)
return policy.obs_ph, self.actions_ph, policy_out
def setup_model(self):
# print("setup model ",self.observation_space.shape)
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy_tf.obs_ph
self.processed_next_obs_ph = self.target_policy_tf.processed_obs
self.action_target = self.target_policy_tf.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.qvalues_ph = tf.placeholder(tf.float32, shape=(None, 1),
name='qvalues')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
self.policy_out = policy_out = self.policy_tf.make_actor(self.processed_obs_ph)
# Use two Q-functions to improve performance by reducing overestimation bias
qf1, qf2 = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph)
# qf3, qf4 = self.policy_tf.make_duel_critics(self.processed_obs_ph, self.actions_ph,
# scope="buffer_values_fn")
# Q value when following the current policy
qf1_pi, qf2_pi = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, reuse=True)
# self.qf1 = qf1
# self.qf2 = qf2
self.qf1_pi = qf1_pi
self.qf2_pi = qf2_pi
with tf.variable_scope("target", reuse=False):
# Create target networks
target_policy_out = self.target_policy_tf.make_actor(self.processed_next_obs_ph)
# Target policy smoothing, by adding clipped noise to target actions
target_noise = tf.random_normal(tf.shape(target_policy_out), stddev=self.target_policy_noise)
target_noise = tf.clip_by_value(target_noise, -self.target_noise_clip, self.target_noise_clip)
# Clip the noisy action to remain in the bounds [-1, 1] (output of a tanh)
noisy_target_action = tf.clip_by_value(target_policy_out + target_noise, -1, 1)
# Q values when following the target policy
qf1_target, qf2_target = self.target_policy_tf.make_critics(self.processed_next_obs_ph,
noisy_target_action)
self.qf1_target = qf1_target
self.qf2_target = qf2_target
self.qf1_target_no_pi, self.qf2_target_no_pi = self.target_policy_tf.make_critics(
self.processed_obs_ph, self.actions_ph, reuse=True)
with tf.variable_scope("loss", reuse=False):
# Take the min of the two target Q-Values (clipped Double-Q Learning)
min_qf_target = tf.minimum(qf1_target, qf2_target)
# Targets for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * min_qf_target
)
# Compute Q-Function loss
qf1_loss = tf.reduce_mean((tf.maximum(self.qvalues_ph, q_backup) - qf1) ** 2)
qf2_loss = tf.reduce_mean((tf.maximum(self.qvalues_ph, q_backup) - qf2) ** 2)
# qf1_loss = tf.reduce_mean((q_backup - qf1) ** 2) + tf.reduce_mean((self.qvalues_ph - qf1) ** 2)
# qf2_loss = tf.reduce_mean((q_backup - qf2) ** 2) + tf.reduce_mean((self.qvalues_ph - qf2) ** 2)
# qf1_loss = tf.reduce_mean((q_backup - qf1) ** 2)
# qf2_loss = tf.reduce_mean((q_backup - qf2) ** 2)
qvalues_losses = qf1_loss + qf2_loss
# Policy loss: maximise q value
self.policy_loss = policy_loss = -tf.reduce_mean(qf1_pi)
# Policy train op
# will be called only every n training steps,
# where n is the policy delay
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss,
var_list=tf_util.get_trainable_vars('model/pi'))
self.policy_train_op = policy_train_op
# Q Values optimizer
qvalues_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
qvalues_params = tf_util.get_trainable_vars('model/values_fn/')
# Q Values and policy target params
source_params = tf_util.get_trainable_vars("model/")
target_params = tf_util.get_trainable_vars("target/")
# Polyak averaging for target variables
self.target_ops = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
train_values_op = qvalues_optimizer.minimize(qvalues_losses, var_list=qvalues_params)
self.infos_names = ['qf1_loss', 'qf2_loss']
# All ops to call during one training step
self.step_ops = [qf1_loss, qf2_loss,
qf1, qf2, train_values_op]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
self.memory = EpisodicMemory(int(1e6), state_dim=1, action_dim=1,
obs_space=self.observation_space,
action_shape=self.action_space.shape,
qfs=None, obs_ph=self.policy_tf.processed_obs,
action_ph=self.actions_ph, sess=self.sess)
def _train_step(self, step, writer, learning_rate, update_policy):
# Sample a batch from the replay buffer
# batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch = self.memory.sample(self.batch_size, mix=False)
if batch is None:
return 0, 0, 0, 0
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones, batch_returns = batch['obs0'], batch[
'actions'], batch['rewards'], batch['obs1'], batch['terminals1'], batch['return']
# batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
# batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate,
self.qvalues_ph: batch_returns.reshape(self.batch_size, -1)
# self.qvalues_ph: np.ones((self.batch_size, 1))
}
# print("training ",batch_obs.shape)
step_ops = self.step_ops
if update_policy:
# Update policy and target networks
step_ops = step_ops + [self.policy_train_op, self.target_ops, self.policy_loss]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(step_ops, feed_dict)
# Unpack to monitor losses
qf1_loss, qf2_loss, qf3_loss, qf4_loss, *_values = out
return qf1_loss, qf2_loss, qf3_loss, qf4_loss
def learn(self, total_timesteps, eval_interval=10000, update_interval=10000, callback=None,
log_interval=4, tb_log_name="TD3", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
qs_buffer = deque(maxlen=1000)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
discount_episodic_reward = 0.
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None]).flatten()
# Add noise to the action, as the policy
# is deterministic, this is required for exploration
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# Rescale from [-1, 1] to the correct bounds
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
q1, q2 = self.sess.run([self.qf1_target_no_pi, self.qf2_target_no_pi],
feed_dict={self.processed_obs_ph: obs[None], self.actions_ph: [action]})
q = np.squeeze(np.minimum(q1, q2))
qs_buffer.extend([q])
discount_episodic_reward = reward_ + self.gamma * discount_episodic_reward
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
self.sequence.append((obs_, action, self.state_repr_t, self.action_repr_t, reward_, None, False))
truly_done = info.get('truly_done', True)
if done:
# action, q = self.pi(obs1, apply_noise=False, compute_Q=True)
if truly_done:
self.sequence.append(
(new_obs_, action, self.state_repr_t, self.action_repr_t, 0, 0, done))
else:
q1, q2 = self.sess.run([self.qf1_target, self.qf2_target],
feed_dict={self.processed_next_obs_ph: new_obs[None]})
q = np.minimum(q1, q2)
self.sequence.append(
(new_obs_, action, self.state_repr_t, self.action_repr_t, 0, np.squeeze(q), done))
# self.episodic_memory.update_sequence_iterate(self.sequence, self.k)
self.memory.update_sequence_corrected(self.sequence)
# self.memory.update_sequence_corrected(self.sequence)
self.sequence = []
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) \
or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
# Note: the policy is updated less frequently than the Q functions
# this is controlled by the `policy_delay` parameter
# if_train_policy = (step > self.start_policy_learning) and \
if_train_policy = ((step + grad_step) % self.policy_delay == 0)
mb_infos_vals.append(
self._train_step(step, writer, current_lr, if_train_policy))
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
if step % eval_interval == 0:
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if self.eval_env is not None:
eval_episode_reward = 0.
for _ in range(self.nb_eval_steps):
if step >= total_timesteps:
return self
eval_action = self.policy_tf.step(obs[None]).flatten()
# eval_action = self.non_param_policy(obs[None]).flatten()
unscaled_action = unscale_action(self.action_space, eval_action)
eval_obs, eval_r, eval_done, eval_info = self.eval_env.step(unscaled_action)
eval_episode_reward += eval_r
# Retrieve reward and episode length if using Monitor wrapper
eval_maybe_ep_info = eval_info.get('episode')
if eval_maybe_ep_info is not None:
self.eval_ep_info_buf.extend([eval_maybe_ep_info])
if eval_done:
if not isinstance(self.env, VecEnv):
eval_obs = self.eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_reward = 0.
if len(eval_episode_rewards[-101:-1]) == 0:
eval_mean_reward = -np.inf
else:
eval_mean_reward = round(float(np.mean(eval_episode_rewards[-101:-1])), 1)
logger.logkv("eval mean 100 episode reward", eval_mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('eval_ep_rewmean',
safe_mean([ep_info['r'] for ep_info in self.eval_ep_info_buf]))
logger.logkv('eval_eplenmean',
safe_mean([ep_info['l'] for ep_info in self.eval_ep_info_buf]))
logger.logkv('eval_time_elapsed', int(time.time() - start_time))
logger.dumpkvs()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
# substract 1 as we appended a new term just now
num_episodes = len(episode_rewards) - 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv('qs_mean', safe_mean([x for x in qs_buffer]))
# logger.logkv('q4_mean', safe_mean([x for x in q4s]))
logger.logkv('discount_q', discount_episodic_reward)
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
qs_buffer.clear()
# q4s.clear()
discount_episodic_reward = 0.
infos_values = []
callback.on_training_end()
return self
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
_ = np.array(observation)
if actions is not None:
raise ValueError("Error: TD3 does not have action probabilities.")
# here there are no action probabilities, as DDPG does not use a probability distribution
warnings.warn("Warning: action probability is meaningless for TD3. Returning None")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation)
if self.action_noise is not None and not deterministic:
actions = np.clip(actions + self.action_noise(), -1, 1)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False):
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"policy_delay": self.policy_delay,
"target_noise_clip": self.target_noise_clip,
"target_policy_noise": self.target_policy_noise,
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| 52.452381
| 119
| 0.582115
|
e7f379548ebb0ca14d1a1a36e3c8eb78168127de
| 2,536
|
py
|
Python
|
pycondor/tests/test_visualize.py
|
GregoryAshton/pycondor
|
72423eac508c6d5ec79f8f3de71011ba61a38537
|
[
"MIT"
] | 23
|
2017-05-16T21:37:07.000Z
|
2021-11-15T13:46:03.000Z
|
pycondor/tests/test_visualize.py
|
GregoryAshton/pycondor
|
72423eac508c6d5ec79f8f3de71011ba61a38537
|
[
"MIT"
] | 83
|
2017-01-27T21:00:38.000Z
|
2022-02-10T16:27:54.000Z
|
pycondor/tests/test_visualize.py
|
GregoryAshton/pycondor
|
72423eac508c6d5ec79f8f3de71011ba61a38537
|
[
"MIT"
] | 14
|
2017-04-05T15:40:37.000Z
|
2021-11-15T13:38:09.000Z
|
import pytest
graphviz = pytest.importorskip('graphviz') # noqa: E402
import os
import re
from pycondor.job import Job
from pycondor.dagman import Dagman
from pycondor.visualize import visualize, extract_format, dag_to_graphviz
from pycondor.utils import clear_pycondor_environment_variables
clear_pycondor_environment_variables()
here = os.path.abspath(os.path.dirname(__file__))
example_script = os.path.join(here, 'example_script.py')
@pytest.fixture()
def dagman(tmpdir_factory):
dag = Dagman(name='example_dagman')
merge = Job(name='merge',
executable='merge.py',
dag=dag)
for i in range(5):
processing = Job(name='processing_{}'.format(i),
executable='process.py',
dag=dag)
merge.add_parent(processing)
cleanup_dag = Dagman(name='cleanup',
dag=dag)
cleanup_dag.add_parent(merge)
return dag
def test_visualize_save_file(dagman, tmpdir):
filename = str(tmpdir.join('viz.png'))
visualize(dagman, filename)
assert os.path.exists(filename)
@pytest.mark.parametrize('filename, expected', [
('myfile.png', 'png'),
('myfile.pdf', 'pdf'),
('myfile.dot', 'dot'),
('myfile.svg', 'svg'),
('myfile.jpeg', 'jpeg'),
('myfile.jpg', 'jpg'),
])
def test_extract_format(filename, expected):
assert extract_format(filename) == expected
def test_extract_format_invalid_format(dagman):
with pytest.raises(ValueError) as excinfo:
extract_format('dag_graph.csv')
assert 'invalid format' in str(excinfo.value).lower()
def test_dag_to_graphviz(dagman):
g = dag_to_graphviz(dagman)
assert isinstance(g, graphviz.Digraph)
def test_graph_shapes(dagman):
g = dag_to_graphviz(dagman)
shapes = {}
label_shape_re = re.compile(r'.*\[label=(.*?) shape=(.*?)\]')
for line in g.body:
match = label_shape_re.match(line)
if match:
name = match.group(1)
shape = match.group(2)
shapes[name] = shape
else:
continue
# Check node names
assert set(node.name for node in dagman) == set(shapes.keys())
# Check node shapes
for node in dagman:
expected_shape = 'circle' if isinstance(node, Job) else 'square'
assert shapes[node.name] == expected_shape
def test_visualize_method(dagman):
graph_vis_func = visualize(dagman).body
graph_vis_method = dagman.visualize().body
assert graph_vis_func == graph_vis_method
| 27.268817
| 73
| 0.656151
|
1d41b1ad8ea1ce625eb5d29957feaf635f8bb1c8
| 7,879
|
py
|
Python
|
lxml/builder.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2017-12-05T15:35:47.000Z
|
2017-12-05T15:35:47.000Z
|
lxml/builder.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
lxml_aws_binaries/lxml/builder.py
|
jersearls/cpap-filler
|
4106c6098d8caf13dcf1acad4366a8e0cc99db02
|
[
"MIT"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
#
# Element generator factory by Fredrik Lundh.
#
# Source:
# http://online.effbot.org/2006_11_01_archive.htm#et-builder
# http://effbot.python-hosting.com/file/stuff/sandbox/elementlib/builder.py
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
The ``E`` Element factory for generating XML documents.
"""
import lxml.etree as ET
from functools import partial
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class ElementMaker(object):
"""Element generator factory.
Unlike the ordinary Element factory, the E factory allows you to pass in
more than just a tag and some optional attributes; you can also pass in
text and other elements. The text is added as either text or tail
attributes, and elements are inserted at the right spot. Some small
examples::
>>> from lxml import etree as ET
>>> from lxml.builder import E
>>> ET.tostring(E("tag"))
'<tag/>'
>>> ET.tostring(E("tag", "text"))
'<tag>text</tag>'
>>> ET.tostring(E("tag", "text", key="value"))
'<tag key="value">text</tag>'
>>> ET.tostring(E("tag", E("subtag", "text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
For simple tags, the factory also allows you to write ``E.tag(...)`` instead
of ``E('tag', ...)``::
>>> ET.tostring(E.tag())
'<tag/>'
>>> ET.tostring(E.tag("text"))
'<tag>text</tag>'
>>> ET.tostring(E.tag(E.subtag("text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
Here's a somewhat larger example; this shows how to generate HTML
documents, using a mix of prepared factory functions for inline elements,
nested ``E.tag`` calls, and embedded XHTML fragments::
# some common inline elements
A = E.a
I = E.i
B = E.b
def CLASS(v):
# helper function, 'class' is a reserved word
return {'class': v}
page = (
E.html(
E.head(
E.title("This is a sample document")
),
E.body(
E.h1("Hello!", CLASS("title")),
E.p("This is a paragraph with ", B("bold"), " text in it!"),
E.p("This is another paragraph, with a ",
A("link", href="http://www.python.org"), "."),
E.p("Here are some reserved characters: <spam&egg>."),
ET.XML("<p>And finally, here is an embedded XHTML fragment.</p>"),
)
)
)
print ET.tostring(page)
Here's a prettyprinted version of the output from the above script::
<html>
<head>
<title>This is a sample document</title>
</head>
<body>
<h1 class="title">Hello!</h1>
<p>This is a paragraph with <b>bold</b> text in it!</p>
<p>This is another paragraph, with <a href="http://www.python.org">link</a>.</p>
<p>Here are some reserved characters: <spam&egg>.</p>
<p>And finally, here is an embedded XHTML fragment.</p>
</body>
</html>
For namespace support, you can pass a namespace map (``nsmap``)
and/or a specific target ``namespace`` to the ElementMaker class::
>>> E = ElementMaker(namespace="http://my.ns/")
>>> print(ET.tostring( E.test ))
<test xmlns="http://my.ns/"/>
>>> E = ElementMaker(namespace="http://my.ns/", nsmap={'p':'http://my.ns/'})
>>> print(ET.tostring( E.test ))
<p:test xmlns:p="http://my.ns/"/>
"""
def __init__(self, typemap=None,
namespace=None, nsmap=None, makeelement=None):
if namespace is not None:
self._namespace = '{' + namespace + '}'
else:
self._namespace = None
if nsmap:
self._nsmap = dict(nsmap)
else:
self._nsmap = None
if makeelement is not None:
assert callable(makeelement)
self._makeelement = makeelement
else:
self._makeelement = ET.Element
# initialize type map for this element factory
if typemap:
typemap = typemap.copy()
else:
typemap = {}
def add_text(elem, item):
try:
elem[-1].tail = (elem[-1].tail or "") + item
except IndexError:
elem.text = (elem.text or "") + item
def add_cdata(elem, cdata):
if elem.text:
raise ValueError("Can't add a CDATA section. Element already has some text: %r" % elem.text)
elem.text = cdata
if str not in typemap:
typemap[str] = add_text
if unicode not in typemap:
typemap[unicode] = add_text
if ET.CDATA not in typemap:
typemap[ET.CDATA] = add_cdata
def add_dict(elem, item):
attrib = elem.attrib
for k, v in item.items():
if isinstance(v, basestring):
attrib[k] = v
else:
attrib[k] = typemap[type(v)](None, v)
if dict not in typemap:
typemap[dict] = add_dict
self._typemap = typemap
def __call__(self, tag, *children, **attrib):
get = self._typemap.get
if self._namespace is not None and tag[0] != '{':
tag = self._namespace + tag
elem = self._makeelement(tag, nsmap=self._nsmap)
if attrib:
get(dict)(elem, attrib)
for item in children:
if callable(item):
item = item()
t = get(type(item))
if t is None:
if ET.iselement(item):
elem.append(item)
continue
for basetype in type(item).__mro__:
# See if the typemap knows of any of this type's bases.
t = get(basetype)
if t is not None:
break
else:
raise TypeError("bad argument type: %s(%r)" %
(type(item).__name__, item))
v = t(elem, item)
if v:
get(type(v))(elem, v)
return elem
def __getattr__(self, tag):
return partial(self, tag)
# create factory object
E = ElementMaker()
| 33.52766
| 108
| 0.552989
|
fcae2e3846b7677b8be7ca813186bc423acb4fff
| 791
|
py
|
Python
|
async_stripe/api_resources/abstract/deletable_api_resource.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2021-05-29T08:57:58.000Z
|
2022-02-19T07:09:25.000Z
|
async_stripe/api_resources/abstract/deletable_api_resource.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-05-31T10:18:36.000Z
|
2022-01-25T11:39:03.000Z
|
async_stripe/api_resources/abstract/deletable_api_resource.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-05-29T13:27:10.000Z
|
2021-05-29T13:27:10.000Z
|
from stripe import util
from stripe.six.moves.urllib.parse import quote_plus
from stripe.api_resources.abstract.deletable_api_resource import (
DeletableAPIResource,
)
async def _cls_delete_patch(cls, sid, **params):
url = "%s/%s" % (cls.class_url(), quote_plus(util.utf8(sid)))
return await cls._static_request("delete", url, **params)
@util.class_method_variant("_cls_delete")
async def delete_patch(self, **params):
self.refresh_from(await self.request("delete", self.instance_url(), params))
return self
DeletableAPIResource._cls_delete = classmethod(_cls_delete_patch)
DeletableAPIResource.delete = delete_patch
for subclass in DeletableAPIResource.__subclasses__():
subclass._cls_delete = classmethod(_cls_delete_patch)
subclass.delete = delete_patch
| 32.958333
| 80
| 0.777497
|
9507be01c14445bed7df1105de8c3a4c37a30532
| 266
|
py
|
Python
|
cracking_the_coding_interview_qs/17.14/get_k_smallest_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/17.14/get_k_smallest_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/17.14/get_k_smallest_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from get_k_smallest import get_k_smallest
class Test_Case_Get_K_Smallest(unittest.TestCase):
def test_get_k_smallest(self):
self.assertListEqual(get_k_smallest(3, list(range(30))), [0,1,2])
if __name__ == '__main__':
unittest.main()
| 29.555556
| 73
| 0.755639
|
7edad150a42213d07cad7aed661c2b6b03c00631
| 534
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/sowtestapp-4
|
ec084de3641536274f6acf66f4a575fd2652ded3
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/sowtestapp-4
|
ec084de3641536274f6acf66f4a575fd2652ded3
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/sowtestapp-4
|
ec084de3641536274f6acf66f4a575fd2652ded3
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sowtestapp-4.botics.co"
site_params = {
"name": "SOWTestApp",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.538462
| 61
| 0.655431
|
db022e99931a23b1b284bb9b5e9808bed4a4b062
| 12,370
|
py
|
Python
|
modules/api/functional_test/live_tests/shared_zone_test_context.py
|
dunn5/vinyldns
|
a8505d7130c9846c3ab2fec5722aa3e57585a28e
|
[
"Apache-2.0"
] | null | null | null |
modules/api/functional_test/live_tests/shared_zone_test_context.py
|
dunn5/vinyldns
|
a8505d7130c9846c3ab2fec5722aa3e57585a28e
|
[
"Apache-2.0"
] | null | null | null |
modules/api/functional_test/live_tests/shared_zone_test_context.py
|
dunn5/vinyldns
|
a8505d7130c9846c3ab2fec5722aa3e57585a28e
|
[
"Apache-2.0"
] | null | null | null |
import time
from vinyldns_python import VinylDNSClient
from vinyldns_context import VinylDNSTestContext
from hamcrest import *
from utils import *
class SharedZoneTestContext(object):
"""
Creates multiple zones to test authorization / access to shared zones across users
"""
def __init__(self):
self.ok_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, 'okAccessKey', 'okSecretKey')
self.dummy_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, 'dummyAccessKey', 'dummySecretKey')
self.dummy_group = None
self.ok_group = None
self.tear_down() # ensures that the environment is clean before starting
try:
self.ok_group = self.ok_vinyldns_client.get_group("ok", status=200)
# in theory this shouldn't be needed, but getting 'user is not in group' errors on zone creation
self.confirm_member_in_group(self.ok_vinyldns_client, self.ok_group)
dummy_group = {
'name': 'dummy-group',
'email': 'test@test.com',
'description': 'this is a description',
'members': [ { 'id': 'dummy'} ],
'admins': [ { 'id': 'dummy'} ]
}
self.dummy_group = self.dummy_vinyldns_client.create_group(dummy_group, status=200)
# in theory this shouldn't be needed, but getting 'user is not in group' errors on zone creation
self.confirm_member_in_group(self.dummy_vinyldns_client, self.dummy_group)
ok_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': 'ok.',
'email': 'test@test.com',
'shared': False,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'ok.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'ok.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202)
self.ok_zone = ok_zone_change['zone']
dummy_zone_change = self.dummy_vinyldns_client.create_zone(
{
'name': 'dummy.',
'email': 'test@test.com',
'shared': False,
'adminGroupId': self.dummy_group['id'],
'connection': {
'name': 'dummy.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'dummy.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202)
self.dummy_zone = dummy_zone_change['zone']
ip6_reverse_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': '1.9.e.f.c.c.7.2.9.6.d.f.ip6.arpa.',
'email': 'test@test.com',
'shared': True,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'ip6.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'ip6.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202
)
self.ip6_reverse_zone = ip6_reverse_zone_change['zone']
ip4_reverse_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': '30.172.in-addr.arpa.',
'email': 'test@test.com',
'shared': True,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'ip4.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'ip4.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202
)
self.ip4_reverse_zone = ip4_reverse_zone_change['zone']
classless_base_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': '2.0.192.in-addr.arpa.',
'email': 'test@test.com',
'shared': False,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'classless-base.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'classless-base.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202
)
self.classless_base_zone = classless_base_zone_change['zone']
classless_zone_delegation_change = self.ok_vinyldns_client.create_zone(
{
'name': '192/30.2.0.192.in-addr.arpa.',
'email': 'test@test.com',
'shared': False,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'classless.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'classless.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202
)
self.classless_zone_delegation_zone = classless_zone_delegation_change['zone']
system_test_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': 'system-test.',
'email': 'test@test.com',
'shared': True,
'adminGroupId': self.ok_group['id'],
'connection': {
'name': 'system-test.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'system-test.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202
)
self.system_test_zone = system_test_zone_change['zone']
# parent zone gives access to the dummy user, dummy user cannot manage ns records
parent_zone_change = self.ok_vinyldns_client.create_zone(
{
'name': 'parent.com.',
'email': 'test@test.com',
'shared': False,
'adminGroupId': self.ok_group['id'],
'acl': {
'rules': [
{
'accessLevel': 'Delete',
'description': 'some_test_rule',
'userId': 'dummy'
}
]
},
'connection': {
'name': 'parent.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
},
'transferConnection': {
'name': 'parent.',
'keyName': VinylDNSTestContext.dns_key_name,
'key': VinylDNSTestContext.dns_key,
'primaryServer': VinylDNSTestContext.dns_ip
}
}, status=202)
self.parent_zone = parent_zone_change['zone']
# wait until our zones are created
self.ok_vinyldns_client.wait_until_zone_exists(system_test_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(ok_zone_change)
self.dummy_vinyldns_client.wait_until_zone_exists(dummy_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(ip6_reverse_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(ip4_reverse_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(classless_base_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(classless_zone_delegation_change)
self.ok_vinyldns_client.wait_until_zone_exists(system_test_zone_change)
self.ok_vinyldns_client.wait_until_zone_exists(parent_zone_change)
# validate all in there
zones = self.dummy_vinyldns_client.list_zones()['zones']
assert_that(len(zones), is_(2))
zones = self.ok_vinyldns_client.list_zones()['zones']
assert_that(len(zones), is_(7))
except:
# teardown if there was any issue in setup
try:
self.tear_down()
except:
pass
raise
def tear_down(self):
"""
The ok_vinyldns_client is a zone admin on _all_ the zones.
We shouldn't have to do any checks now, as zone admins have full rights to all zones, including
deleting all records (even in the old shared model)
"""
clear_zones(self.dummy_vinyldns_client)
clear_zones(self.ok_vinyldns_client)
clear_groups(self.dummy_vinyldns_client)
clear_groups(self.ok_vinyldns_client, exclude=['ok'])
# reset ok_group
ok_group = {
'id': 'ok',
'name': 'ok',
'email': 'test@test.com',
'description': 'this is a description',
'members': [ { 'id': 'ok'} ],
'admins': [ { 'id': 'ok'} ]
}
self.ok_vinyldns_client.update_group(ok_group['id'], ok_group, status=200)
def confirm_member_in_group(self, client, group):
retries = 2
success = group in client.list_all_my_groups(status=200)
while retries >= 0 and not success:
success = group in client.list_all_my_groups(status=200)
time.sleep(.05)
retries -= 1
assert_that(success, is_(True))
| 44.818841
| 121
| 0.501132
|
735249eebd6af04c4542f65596dcc37e00053e23
| 56,995
|
py
|
Python
|
chives/rpc/wallet_rpc_api.py
|
HiveProject2021/chives-light-wallet
|
0c7c36bfc703b26ce3c938027de643dc90e4191f
|
[
"Apache-2.0"
] | 7
|
2021-12-26T11:05:19.000Z
|
2022-02-24T10:42:45.000Z
|
chives/rpc/wallet_rpc_api.py
|
HiveProject2021/chives-light-wallet
|
0c7c36bfc703b26ce3c938027de643dc90e4191f
|
[
"Apache-2.0"
] | 8
|
2021-12-14T17:27:29.000Z
|
2022-03-29T18:18:22.000Z
|
chives/rpc/wallet_rpc_api.py
|
HiveProject2021/chives-light-wallet
|
0c7c36bfc703b26ce3c938027de643dc90e4191f
|
[
"Apache-2.0"
] | 1
|
2021-12-09T23:51:12.000Z
|
2021-12-09T23:51:12.000Z
|
import asyncio
import logging
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Any
from blspy import PrivateKey, G1Element
from chives.consensus.block_rewards import calculate_base_community_reward, calculate_base_farmer_reward, calculate_pool_reward
from chives.pools.pool_wallet import PoolWallet
from chives.pools.pool_wallet_info import create_pool_state, FARMING_TO_POOL, PoolWalletInfo, PoolState
from chives.protocols.protocol_message_types import ProtocolMessageTypes
from chives.server.outbound_message import NodeType, make_msg
from chives.simulator.simulator_protocol import FarmNewBlockProtocol
from chives.types.blockchain_format.coin import Coin
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from chives.util.byte_types import hexstr_to_bytes
from chives.util.ints import uint32, uint64
from chives.util.keychain import KeyringIsLocked, bytes_to_mnemonic, generate_mnemonic
from chives.util.path import path_from_root
from chives.util.ws_message import WsRpcMessage, create_payload_dict
from chives.wallet.cc_wallet.cat_constants import DEFAULT_CATS
from chives.wallet.cc_wallet.cc_wallet import CCWallet
from chives.wallet.derive_keys import master_sk_to_singleton_owner_sk, master_sk_to_wallet_sk_unhardened
from chives.wallet.rl_wallet.rl_wallet import RLWallet
from chives.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk
from chives.wallet.did_wallet.did_wallet import DIDWallet
from chives.wallet.trade_record import TradeRecord
from chives.wallet.transaction_record import TransactionRecord
from chives.wallet.util.trade_utils import trade_record_to_dict
from chives.wallet.util.transaction_type import TransactionType
from chives.wallet.util.wallet_types import WalletType
from chives.wallet.wallet_info import WalletInfo
from chives.wallet.wallet_node import WalletNode
from chives.util.config import load_config
from chives.consensus.coinbase import create_puzzlehash_for_pk
# Timeout for response from wallet/full node for sending a transaction
TIMEOUT = 30
log = logging.getLogger(__name__)
class WalletRpcApi:
def __init__(self, wallet_node: WalletNode):
assert wallet_node is not None
self.service = wallet_node
self.service_name = "chives_wallet"
self.balance_cache: Dict[int, Any] = {}
def get_routes(self) -> Dict[str, Callable]:
return {
# Key management
"/log_in": self.log_in,
"/get_public_keys": self.get_public_keys,
"/get_private_key": self.get_private_key,
"/generate_mnemonic": self.generate_mnemonic,
"/add_key": self.add_key,
"/delete_key": self.delete_key,
"/check_delete_key": self.check_delete_key,
"/delete_all_keys": self.delete_all_keys,
# Wallet node
"/get_sync_status": self.get_sync_status,
"/get_height_info": self.get_height_info,
"/farm_block": self.farm_block, # Only when node simulator is running
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
# Wallet management
"/get_wallets": self.get_wallets,
"/create_new_wallet": self.create_new_wallet,
# Wallet
"/get_wallet_balance": self.get_wallet_balance,
"/get_transaction": self.get_transaction,
"/get_transactions": self.get_transactions,
"/get_transaction_count": self.get_transaction_count,
"/get_next_address": self.get_next_address,
"/send_transaction": self.send_transaction,
"/send_transaction_multi": self.send_transaction_multi,
"/get_farmed_amount": self.get_farmed_amount,
"/create_signed_transaction": self.create_signed_transaction,
"/delete_unconfirmed_transactions": self.delete_unconfirmed_transactions,
# Coloured coins and trading
"/cc_set_name": self.cc_set_name,
"/cc_get_name": self.cc_get_name,
"/cc_spend": self.cc_spend,
"/cc_get_colour": self.cc_get_colour,
"/create_offer_for_ids": self.create_offer_for_ids,
"/get_discrepancies_for_offer": self.get_discrepancies_for_offer,
"/respond_to_offer": self.respond_to_offer,
"/get_trade": self.get_trade,
"/get_all_trades": self.get_all_trades,
"/cancel_trade": self.cancel_trade,
"/get_cat_list": self.get_cat_list,
# DID Wallet
"/did_update_recovery_ids": self.did_update_recovery_ids,
"/did_get_pubkey": self.did_get_pubkey,
"/did_get_did": self.did_get_did,
"/did_recovery_spend": self.did_recovery_spend,
"/did_get_recovery_list": self.did_get_recovery_list,
"/did_create_attest": self.did_create_attest,
"/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery,
"/did_create_backup_file": self.did_create_backup_file,
# RL wallet
"/rl_set_user_info": self.rl_set_user_info,
"/send_clawback_transaction:": self.send_clawback_transaction,
"/add_rate_limited_funds:": self.add_rate_limited_funds,
# Pool Wallet
"/pw_join_pool": self.pw_join_pool,
"/pw_self_pool": self.pw_self_pool,
"/pw_absorb_rewards": self.pw_absorb_rewards,
"/pw_status": self.pw_status,
}
async def _state_changed(self, *args) -> List[WsRpcMessage]:
"""
Called by the WalletNode or WalletStateManager when something has changed in the wallet. This
gives us an opportunity to send notifications to all connected clients via WebSocket.
"""
if len(args) < 2:
return []
data = {
"state": args[0],
}
if args[1] is not None:
data["wallet_id"] = args[1]
if args[2] is not None:
data["additional_data"] = args[2]
return [create_payload_dict("state_changed", data, "chives_wallet", "wallet_ui")]
async def _stop_wallet(self):
"""
Stops a currently running wallet/key, which allows starting the wallet with a new key.
Each key has it's own wallet database.
"""
if self.service is not None:
self.service._close()
peers_close_task: Optional[asyncio.Task] = await self.service._await_closed()
if peers_close_task is not None:
await peers_close_task
##########################################################################################
# Key management
##########################################################################################
async def log_in(self, request):
"""
Logs in the wallet with a specific key.
"""
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
return {"success": False, "error": "Unknown Error"}
async def get_public_keys(self, request: Dict):
try:
assert self.service.keychain_proxy is not None # An offering to the mypy gods
fingerprints = [
sk.get_g1().get_fingerprint() for (sk, seed) in await self.service.keychain_proxy.get_all_private_keys()
]
except KeyringIsLocked:
return {"keyring_is_locked": True}
except Exception:
return {"public_key_fingerprints": []}
else:
return {"public_key_fingerprints": fingerprints}
async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]:
try:
assert self.service.keychain_proxy is not None # An offering to the mypy gods
all_keys = await self.service.keychain_proxy.get_all_private_keys()
for sk, seed in all_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
return sk, seed
except Exception as e:
log.error(f"Failed to get private key by fingerprint: {e}")
return None, None
async def get_private_key(self, request):
fingerprint = request["fingerprint"]
sk, seed = await self._get_private_key(fingerprint)
if sk is not None:
s = bytes_to_mnemonic(seed) if seed is not None else None
return {
"private_key": {
"fingerprint": fingerprint,
"sk": bytes(sk).hex(),
"pk": bytes(sk.get_g1()).hex(),
"farmer_pk": bytes(master_sk_to_farmer_sk(sk).get_g1()).hex(),
"pool_pk": bytes(master_sk_to_pool_sk(sk).get_g1()).hex(),
"seed": s,
},
}
return {"success": False, "private_key": {"fingerprint": fingerprint}}
async def generate_mnemonic(self, request: Dict):
return {"mnemonic": generate_mnemonic().split(" ")}
async def add_key(self, request):
if "mnemonic" not in request:
raise ValueError("Mnemonic not in request")
# Adding a key from 24 word mnemonic
mnemonic = request["mnemonic"]
passphrase = ""
try:
sk = await self.service.keychain_proxy.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
except Exception as e:
return {"success": False, "error": str(e)}
fingerprint = sk.get_g1().get_fingerprint()
await self._stop_wallet()
# Makes sure the new key is added to config properly
started = False
try:
await self.service.keychain_proxy.check_keys(self.service.root_path)
except Exception as e:
log.error(f"Failed to check_keys after adding a new key: {e}")
started = await self.service._start(fingerprint=fingerprint)
if started is True:
return {"fingerprint": fingerprint}
raise ValueError("Failed to start")
async def delete_key(self, request):
await self._stop_wallet()
fingerprint = request["fingerprint"]
try:
await self.service.keychain_proxy.delete_key_by_fingerprint(fingerprint)
except Exception as e:
log.error(f"Failed to delete key by fingerprint: {e}")
return {"success": False, "error": str(e)}
path = path_from_root(
self.service.root_path,
f"{self.service.config['database_path']}-{fingerprint}",
)
if path.exists():
path.unlink()
return {}
async def _check_key_used_for_rewards(
self, new_root: Path, sk: PrivateKey, max_ph_to_search: int
) -> Tuple[bool, bool]:
"""Checks if the given key is used for either the farmer rewards or pool rewards
returns a tuple of two booleans
The first is true if the key is used as the Farmer rewards, otherwise false
The second is true if the key is used as the Pool rewards, otherwise false
Returns both false if the key cannot be found with the given fingerprint
"""
if sk is None:
return False, False
config: Dict = load_config(new_root, "config.yaml")
farmer_target = config["farmer"].get("xch_target_address")
pool_target = config["pool"].get("xch_target_address")
found_farmer = False
found_pool = False
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
for i in range(max_ph_to_search):
if found_farmer and found_pool:
break
phs = [
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix),
encode_puzzle_hash(
create_puzzlehash_for_pk(master_sk_to_wallet_sk_unhardened(sk, uint32(i)).get_g1()), prefix
),
]
for ph in phs:
if ph == farmer_target:
found_farmer = True
if ph == pool_target:
found_pool = True
return found_farmer, found_pool
async def check_delete_key(self, request):
"""Check the key use prior to possible deletion
checks whether key is used for either farm or pool rewards
checks if any wallets have a non-zero balance
"""
used_for_farmer: bool = False
used_for_pool: bool = False
walletBalance: bool = False
fingerprint = request["fingerprint"]
sk, _ = await self._get_private_key(fingerprint)
if sk is not None:
used_for_farmer, used_for_pool = await self._check_key_used_for_rewards(self.service.root_path, sk, 100)
if self.service.logged_in_fingerprint != fingerprint:
await self._stop_wallet()
await self.service._start(fingerprint=fingerprint)
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
for w in wallets:
wallet = self.service.wallet_state_manager.wallets[w.id]
unspent = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(w.id)
balance = await wallet.get_confirmed_balance(unspent)
pending_balance = await wallet.get_unconfirmed_balance(unspent)
if (balance + pending_balance) > 0:
walletBalance = True
break
return {
"fingerprint": fingerprint,
"used_for_farmer_rewards": used_for_farmer,
"used_for_pool_rewards": used_for_pool,
"wallet_balance": walletBalance,
}
async def delete_all_keys(self, request: Dict):
await self._stop_wallet()
try:
assert self.service.keychain_proxy is not None # An offering to the mypy gods
await self.service.keychain_proxy.delete_all_keys()
except Exception as e:
log.error(f"Failed to delete all keys: {e}")
return {"success": False, "error": str(e)}
path = path_from_root(self.service.root_path, self.service.config["database_path"])
if path.exists():
path.unlink()
return {}
##########################################################################################
# Wallet Node
##########################################################################################
async def get_sync_status(self, request: Dict):
assert self.service.wallet_state_manager is not None
syncing = self.service.wallet_state_manager.sync_mode
synced = await self.service.wallet_state_manager.synced()
return {"synced": synced, "syncing": syncing, "genesis_initialized": True}
async def get_height_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
height = self.service.wallet_state_manager.blockchain.get_peak_height()
return {"height": height}
async def get_network_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def farm_block(self, request):
raw_puzzle_hash = decode_puzzle_hash(request["address"])
request = FarmNewBlockProtocol(raw_puzzle_hash)
msg = make_msg(ProtocolMessageTypes.farm_new_block, request)
await self.service.server.send_to_all([msg], NodeType.FULL_NODE)
return {}
##########################################################################################
# Wallet Management
##########################################################################################
async def get_wallets(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
return {"wallets": wallets}
async def create_new_wallet(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallet_state_manager = self.service.wallet_state_manager
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced.")
main_wallet = wallet_state_manager.main_wallet
if "fee" in request:
fee: uint64 = request["fee"]
else:
fee = uint64(0)
if request["wallet_type"] == "cat_wallet":
name = request.get("name", "Token Wallet")
if request["mode"] == "new":
async with self.service.wallet_state_manager.lock:
cc_wallet: CCWallet = await CCWallet.create_new_cc_wallet(
wallet_state_manager,
main_wallet,
{"identifier": "genesis_by_id"},
uint64(request["amount"]),
name,
)
colour = cc_wallet.get_colour()
self.service.wallet_state_manager.state_changed("wallet_created")
return {"type": cc_wallet.type(), "colour": colour, "wallet_id": cc_wallet.id()}
elif request["mode"] == "existing":
async with self.service.wallet_state_manager.lock:
cc_wallet = await CCWallet.create_wallet_for_cc(
wallet_state_manager, main_wallet, request["colour"]
)
self.service.wallet_state_manager.state_changed("wallet_created")
return {"type": cc_wallet.type(), "colour": request["colour"], "wallet_id": cc_wallet.id()}
else: # undefined mode
pass
elif request["wallet_type"] == "rl_wallet":
if request["rl_type"] == "admin":
log.info("Create rl admin wallet")
async with self.service.wallet_state_manager.lock:
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager)
success = await rl_admin.admin_create_coin(
uint64(int(request["interval"])),
uint64(int(request["limit"])),
request["pubkey"],
uint64(int(request["amount"])),
uint64(int(request["fee"])) if "fee" in request else uint64(0),
)
assert rl_admin.rl_info.admin_pubkey is not None
return {
"success": success,
"id": rl_admin.id(),
"type": rl_admin.type(),
"origin": rl_admin.rl_info.rl_origin,
"pubkey": rl_admin.rl_info.admin_pubkey.hex(),
}
elif request["rl_type"] == "user":
log.info("Create rl user wallet")
async with self.service.wallet_state_manager.lock:
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager)
assert rl_user.rl_info.user_pubkey is not None
return {
"id": rl_user.id(),
"type": rl_user.type(),
"pubkey": rl_user.rl_info.user_pubkey.hex(),
}
else: # undefined rl_type
pass
elif request["wallet_type"] == "did_wallet":
if request["did_type"] == "new":
backup_dids = []
num_needed = 0
for d in request["backup_dids"]:
backup_dids.append(hexstr_to_bytes(d))
if len(backup_dids) > 0:
num_needed = uint64(request["num_of_backup_ids_needed"])
async with self.service.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_state_manager,
main_wallet,
uint64(request["amount"]),
backup_dids,
uint64(num_needed),
)
my_did = did_wallet.get_my_DID()
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
}
elif request["did_type"] == "recovery":
async with self.service.wallet_state_manager.lock:
did_wallet = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_state_manager, main_wallet, request["filename"]
)
assert did_wallet.did_info.temp_coin is not None
assert did_wallet.did_info.temp_puzhash is not None
assert did_wallet.did_info.temp_pubkey is not None
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
coin_list = did_wallet.did_info.temp_coin.as_list()
newpuzhash = did_wallet.did_info.temp_puzhash
pubkey = did_wallet.did_info.temp_pubkey
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
"coin_name": coin_name,
"coin_list": coin_list,
"newpuzhash": newpuzhash.hex(),
"pubkey": pubkey.hex(),
"backup_dids": did_wallet.did_info.backup_ids,
"num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed,
}
elif request["wallet_type"] == "pool_wallet":
if request["mode"] == "new":
owner_puzzle_hash: bytes32 = await self.service.wallet_state_manager.main_wallet.get_puzzle_hash(True)
from chives.pools.pool_wallet_info import initial_pool_state_from_dict
async with self.service.wallet_state_manager.lock:
last_wallet: Optional[
WalletInfo
] = await self.service.wallet_state_manager.user_store.get_last_wallet()
assert last_wallet is not None
next_id = last_wallet.id + 1
owner_sk: PrivateKey = master_sk_to_singleton_owner_sk(
self.service.wallet_state_manager.private_key, uint32(next_id)
)
owner_pk: G1Element = owner_sk.get_g1()
initial_target_state = initial_pool_state_from_dict(
request["initial_target_state"], owner_pk, owner_puzzle_hash
)
assert initial_target_state is not None
try:
delayed_address = None
if "p2_singleton_delayed_ph" in request:
delayed_address = hexstr_to_bytes(request["p2_singleton_delayed_ph"])
tr, p2_singleton_puzzle_hash, launcher_id = await PoolWallet.create_new_pool_wallet_transaction(
wallet_state_manager,
main_wallet,
initial_target_state,
fee,
request.get("p2_singleton_delay_time", None),
delayed_address,
)
except Exception as e:
raise ValueError(str(e))
return {
"transaction": tr,
"launcher_id": launcher_id.hex(),
"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex(),
}
elif request["mode"] == "recovery":
raise ValueError("Need upgraded singleton for on-chain recovery")
else: # undefined did_type
pass
else: # undefined wallet_type
pass
return None
##########################################################################################
# Wallet
##########################################################################################
async def get_wallet_balance(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
# If syncing return the last available info or 0s
syncing = self.service.wallet_state_manager.sync_mode
if syncing:
if wallet_id in self.balance_cache:
wallet_balance = self.balance_cache[wallet_id]
else:
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": 0,
"unconfirmed_wallet_balance": 0,
"spendable_balance": 0,
"pending_change": 0,
"max_send_amount": 0,
"unspent_coin_count": 0,
"pending_coin_removal_count": 0,
}
else:
async with self.service.wallet_state_manager.lock:
unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(
wallet_id
)
balance = await wallet.get_confirmed_balance(unspent_records)
pending_balance = await wallet.get_unconfirmed_balance(unspent_records)
spendable_balance = await wallet.get_spendable_balance(unspent_records)
pending_change = await wallet.get_pending_change_balance()
max_send_amount = await wallet.get_max_send_amount(unspent_records)
unconfirmed_removals: Dict[
bytes32, Coin
] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id)
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": balance,
"unconfirmed_wallet_balance": pending_balance,
"spendable_balance": spendable_balance,
"pending_change": pending_change,
"max_send_amount": max_send_amount,
"unspent_coin_count": len(unspent_records),
"pending_coin_removal_count": len(unconfirmed_removals),
}
self.balance_cache[wallet_id] = wallet_balance
return {"wallet_balance": wallet_balance}
async def get_transaction(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"]))
tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id)
if tr is None:
raise ValueError(f"Transaction 0x{transaction_id.hex()} not found")
return {
"transaction": tr.to_json_dict_convenience(self.service.config),
"transaction_id": tr.name,
}
async def get_transactions(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
if "start" in request:
start = request["start"]
else:
start = 0
if "end" in request:
end = request["end"]
else:
end = 1000
transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end)
return {
"transactions": [tr.to_json_dict_convenience(self.service.config) for tr in transactions],
"wallet_id": wallet_id,
}
async def get_transaction_count(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id)
return {
"count": count,
"wallet_id": wallet_id,
}
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
async def get_initial_freeze_period(self, _: Dict):
# Mon May 03 2021 17:00:00 GMT+0000
return {"INITIAL_FREEZE_END_TIMESTAMP": 1620061200}
async def get_next_address(self, request: Dict) -> Dict:
"""
Returns a new address
"""
assert self.service.wallet_state_manager is not None
if request["new_address"] is True:
create_new = True
else:
create_new = False
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
if wallet.type() == WalletType.STANDARD_WALLET:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
elif wallet.type() == WalletType.COLOURED_COIN:
raw_puzzle_hash = await wallet.standard_wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
else:
raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes")
return {
"wallet_id": wallet_id,
"address": address,
}
async def send_transaction(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
wallet_id = int(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
if wallet.type() == WalletType.COLOURED_COIN:
raise ValueError("send_transaction does not work for CAT wallets")
if not isinstance(request["amount"], int) or not isinstance(request["fee"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
puzzle_hash: bytes32 = decode_puzzle_hash(request["address"])
memos: Optional[bytes] = None
if "memos" in request:
memos = [mem.encode("utf-8") for mem in request["memos"]]
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee, memos=memos)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx.to_json_dict_convenience(self.service.config),
"transaction_id": tx.name,
}
async def send_transaction_multi(self, request) -> Dict:
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
wallet_id = uint32(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
transaction: Dict = (await self.create_signed_transaction(request, hold_lock=False))["signed_tx"]
tr: TransactionRecord = TransactionRecord.from_json_dict_convenience(transaction)
await wallet.push_transaction(tr)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {"transaction": transaction, "transaction_id": tr.name}
async def delete_unconfirmed_transactions(self, request):
wallet_id = uint32(request["wallet_id"])
if wallet_id not in self.service.wallet_state_manager.wallets:
raise ValueError(f"Wallet id {wallet_id} does not exist")
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced.")
async with self.service.wallet_state_manager.lock:
async with self.service.wallet_state_manager.tx_store.db_wrapper.lock:
await self.service.wallet_state_manager.tx_store.db_wrapper.begin_transaction()
await self.service.wallet_state_manager.tx_store.delete_unconfirmed_transactions(wallet_id)
if self.service.wallet_state_manager.wallets[wallet_id].type() == WalletType.POOLING_WALLET.value:
self.service.wallet_state_manager.wallets[wallet_id].target_state = None
await self.service.wallet_state_manager.tx_store.db_wrapper.commit_transaction()
# Update the cache
await self.service.wallet_state_manager.tx_store.rebuild_tx_cache()
return {}
##########################################################################################
# Coloured Coins and Trading
##########################################################################################
async def get_cat_list(self, request):
return {"cat_list": list(DEFAULT_CATS.values())}
async def cc_set_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
await wallet.set_name(str(request["name"]))
return {"wallet_id": wallet_id}
async def cc_get_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
name: str = await wallet.get_name()
return {"wallet_id": wallet_id, "name": name}
async def cc_spend(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced.")
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
#To support send multi address with different amount and memo at one time.
inner_address = request["inner_address"]
inner_address_array = inner_address.split(',')
puzzle_hash_array: List[bytes32] = []
for inner_address_key in inner_address_array:
puzzle_hash_array.append(decode_puzzle_hash(inner_address_key))
amount = str(request["amount"])
amount_array = amount.split(',')
amount_int_array: List[uint64] = []
for amount_key in amount_array:
amount_int_array.append(uint64(int(amount_key)))
memos = str(request["memos"])
memos_array = memos.split(',')
memos_key_array: List[bytes] = []
for memos_key in memos_array:
memos_key_array.append(memos_key.encode("utf-8"))
if len(puzzle_hash_array) != len(amount_int_array):
raise ValueError("The number of addresses and amounts must be equal. Multiple addresses and amounts are separated by commas")
if len(puzzle_hash_array) != len(memos_key_array):
raise ValueError("The number of addresses and memos must be equal. Multiple addresses and memos are separated by commas")
#if not isinstance(request["amount"], int) or not isinstance(request["amount"], int):
# raise ValueError("An integer amount or fee is required (too many decimals)")
#amount: uint64 = uint64(request["amount"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
txs: TransactionRecord = await wallet.generate_signed_transaction(
amount_int_array, puzzle_hash_array, fee, memos=memos_key_array
)
for tx in txs:
await wallet.standard_wallet.push_transaction(tx)
return {
"transaction": txs[0].to_json_dict_convenience(self.service.config),
"transaction_id": txs[0].name,
}
async def cc_get_colour(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
colour: str = wallet.get_colour()
return {"colour": colour, "wallet_id": wallet_id}
async def create_offer_for_ids(self, request):
assert self.service.wallet_state_manager is not None
offer = request["ids"]
file_name = request["filename"]
async with self.service.wallet_state_manager.lock:
(
success,
spend_bundle,
error,
) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name)
if success:
self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle)
return {}
raise ValueError(error)
async def get_discrepancies_for_offer(self, request):
assert self.service.wallet_state_manager is not None
file_name = request["filename"]
file_path = Path(file_name)
async with self.service.wallet_state_manager.lock:
(
success,
discrepancies,
error,
) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path)
if success:
return {"discrepancies": discrepancies}
raise ValueError(error)
async def respond_to_offer(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["filename"])
async with self.service.wallet_state_manager.lock:
(
success,
trade_record,
error,
) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path)
if not success:
raise ValueError(error)
return {}
async def get_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
trade_id = request["trade_id"]
trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id)
if trade is None:
raise ValueError(f"No trade with trade id: {trade_id}")
result = trade_record_to_dict(trade)
return {"trade": result}
async def get_all_trades(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
all_trades = await trade_mgr.get_all_trades()
result = []
for trade in all_trades:
result.append(trade_record_to_dict(trade))
return {"trades": result}
async def cancel_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
wsm = self.service.wallet_state_manager
secure = request["secure"]
trade_id = hexstr_to_bytes(request["trade_id"])
async with self.service.wallet_state_manager.lock:
if secure:
await wsm.trade_manager.cancel_pending_offer_safely(trade_id)
else:
await wsm.trade_manager.cancel_pending_offer(trade_id)
return {}
##########################################################################################
# Distributed Identities
##########################################################################################
async def did_update_recovery_ids(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = []
for _ in request["new_list"]:
recovery_list.append(hexstr_to_bytes(_))
if "num_verifications_required" in request:
new_amount_verifications_required = uint64(request["num_verifications_required"])
else:
new_amount_verifications_required = len(recovery_list)
async with self.service.wallet_state_manager.lock:
update_success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required)
# Update coin with new ID info
spend_bundle = await wallet.create_update_spend()
success = spend_bundle is not None and update_success
return {"success": success}
async def did_get_did(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did: str = wallet.get_my_DID()
async with self.service.wallet_state_manager.lock:
coins = await wallet.select_coins(1)
if coins is None or coins == set():
return {"success": True, "wallet_id": wallet_id, "my_did": my_did}
else:
coin = coins.pop()
return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()}
async def did_get_recovery_list(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = wallet.did_info.backup_ids
recover_hex_list = []
for _ in recovery_list:
recover_hex_list.append(_.hex())
return {
"success": True,
"wallet_id": wallet_id,
"recover_list": recover_hex_list,
"num_required": wallet.did_info.num_of_backup_ids_needed,
}
async def did_recovery_spend(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed:
return {"success": False, "reason": "insufficient messages"}
async with self.service.wallet_state_manager.lock:
(
info_list,
message_spend_bundle,
) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"])
if "pubkey" in request:
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
else:
assert wallet.did_info.temp_pubkey is not None
pubkey = wallet.did_info.temp_pubkey
if "puzhash" in request:
puzhash = hexstr_to_bytes(request["puzhash"])
else:
assert wallet.did_info.temp_puzhash is not None
puzhash = wallet.did_info.temp_puzhash
success = await wallet.recovery_spend(
wallet.did_info.temp_coin,
puzhash,
info_list,
pubkey,
message_spend_bundle,
)
return {"success": success}
async def did_get_pubkey(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex()
return {"success": True, "pubkey": pubkey}
async def did_create_attest(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
info = await wallet.get_info_for_recovery()
coin = hexstr_to_bytes(request["coin_name"])
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
spend_bundle = await wallet.create_attestment(
coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"]
)
if spend_bundle is not None:
return {
"success": True,
"message_spend_bundle": bytes(spend_bundle).hex(),
"info": [info[0].hex(), info[1].hex(), info[2]],
}
else:
return {"success": False}
async def did_get_information_needed_for_recovery(self, request):
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
return {
"success": True,
"wallet_id": wallet_id,
"my_did": my_did,
"coin_name": coin_name,
"newpuzhash": did_wallet.did_info.temp_puzhash,
"pubkey": did_wallet.did_info.temp_pubkey,
"backup_dids": did_wallet.did_info.backup_ids,
}
async def did_create_backup_file(self, request):
try:
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
did_wallet.create_backup(request["filename"])
return {"wallet_id": wallet_id, "success": True}
except Exception:
return {"wallet_id": wallet_id, "success": False}
##########################################################################################
# Rate Limited Wallet
##########################################################################################
async def rl_set_user_info(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
rl_user = self.service.wallet_state_manager.wallets[wallet_id]
origin = request["origin"]
async with self.service.wallet_state_manager.lock:
await rl_user.set_user_info(
uint64(request["interval"]),
uint64(request["limit"]),
origin["parent_coin_info"],
origin["puzzle_hash"],
origin["amount"],
request["admin_pubkey"],
)
return {}
async def send_clawback_transaction(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = int(request["fee"])
async with self.service.wallet_state_manager.lock:
tx = await wallet.clawback_rl_coin_transaction(fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def add_rate_limited_funds(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash)
async with self.service.wallet_state_manager.lock:
await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"])
return {"status": "SUCCESS"}
async def get_farmed_amount(self, request):
tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards()
amount = 0
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
last_height_farmed = 0
for record in tx_records:
if record.wallet_id not in self.service.wallet_state_manager.wallets:
continue
if record.type == TransactionType.COINBASE_REWARD:
if self.service.wallet_state_manager.wallets[record.wallet_id].type() == WalletType.POOLING_WALLET:
# Don't add pool rewards for pool wallets.
continue
pool_reward_amount += record.amount
height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE)
# Chives Network Code
# Do not need to calculate the Community Rewards Amount To Wallet Card
# 只添加了一行代码,余下的代码只是做了缩进
if( uint64(calculate_base_community_reward(height)) != uint64(record.amount) ):
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
if height > last_height_farmed:
last_height_farmed = height
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
return {
"farmed_amount": amount,
"pool_reward_amount": pool_reward_amount,
"farmer_reward_amount": farmer_reward_amount,
"community_reward_amount": 0,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
}
async def create_signed_transaction(self, request, hold_lock=True) -> Dict:
assert self.service.wallet_state_manager is not None
if "additions" not in request or len(request["additions"]) < 1:
raise ValueError("Specify additions list")
additions: List[Dict] = request["additions"]
amount_0: uint64 = uint64(additions[0]["amount"])
assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT
puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"])
if len(puzzle_hash_0) != 32:
raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0.hex()}")
memos_0 = None if "memos" not in additions[0] else [mem.encode("utf-8") for mem in additions[0]["memos"]]
additional_outputs = []
for addition in additions[1:]:
receiver_ph = hexstr_to_bytes(addition["puzzle_hash"])
if len(receiver_ph) != 32:
raise ValueError(f"Address must be 32 bytes. {receiver_ph.hex()}")
amount = uint64(addition["amount"])
if amount > self.service.constants.MAX_COIN_AMOUNT:
raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}")
memos = None if "memos" not in addition else [mem.encode("utf-8") for mem in addition["memos"]]
additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount, "memos": memos})
fee = uint64(0)
if "fee" in request:
fee = uint64(request["fee"])
coins = None
if "coins" in request and len(request["coins"]) > 0:
coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]])
if hold_lock:
async with self.service.wallet_state_manager.lock:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0,
puzzle_hash_0,
fee,
coins=coins,
ignore_max_send_amount=True,
primaries=additional_outputs,
memos=memos_0,
)
else:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0,
puzzle_hash_0,
fee,
coins=coins,
ignore_max_send_amount=True,
primaries=additional_outputs,
memos=memos_0,
)
return {"signed_tx": signed_tx.to_json_dict_convenience(self.service.config)}
##########################################################################################
# Pool Wallet
##########################################################################################
async def pw_join_pool(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id]
pool_wallet_info: PoolWalletInfo = await wallet.get_current_state()
owner_pubkey = pool_wallet_info.current.owner_pubkey
target_puzzlehash = None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced.")
if "target_puzzlehash" in request:
target_puzzlehash = bytes32(hexstr_to_bytes(request["target_puzzlehash"]))
new_target_state: PoolState = create_pool_state(
FARMING_TO_POOL,
target_puzzlehash,
owner_pubkey,
request["pool_url"],
uint32(request["relative_lock_height"]),
)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.join_pool(new_target_state)
return {"transaction": tx}
async def pw_self_pool(self, request):
# Leaving a pool requires two state transitions.
# First we transition to PoolSingletonState.LEAVING_POOL
# Then we transition to FARMING_TO_POOL or SELF_POOLING
wallet_id = uint32(request["wallet_id"])
wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id]
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced.")
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.self_pool()
return {"transaction": tx}
async def pw_absorb_rewards(self, request):
"""Perform a sweep of the p2_singleton rewards controlled by the pool wallet singleton"""
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before collecting rewards")
wallet_id = uint32(request["wallet_id"])
wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = uint64(request["fee"])
async with self.service.wallet_state_manager.lock:
transaction: TransactionRecord = await wallet.claim_pool_rewards(fee)
state: PoolWalletInfo = await wallet.get_current_state()
return {"state": state.to_json_dict(), "transaction": transaction}
async def pw_status(self, request):
"""Return the complete state of the Pool wallet with id `request["wallet_id"]`"""
wallet_id = uint32(request["wallet_id"])
wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id]
if wallet.type() != WalletType.POOLING_WALLET.value:
raise ValueError(f"wallet_id {wallet_id} is not a pooling wallet")
state: PoolWalletInfo = await wallet.get_current_state()
unconfirmed_transactions: List[TransactionRecord] = await wallet.get_unconfirmed_transactions()
return {
"state": state.to_json_dict(),
"unconfirmed_transactions": unconfirmed_transactions,
}
| 45.486832
| 137
| 0.608948
|
4f85a209e2f4321cced1f99692fa6d3706372441
| 3,461
|
py
|
Python
|
src/problem4.py
|
wenningr/08-Exam1-201920
|
fcfe19a43c186ccfcfe31f2c6c6160151cd45187
|
[
"MIT"
] | null | null | null |
src/problem4.py
|
wenningr/08-Exam1-201920
|
fcfe19a43c186ccfcfe31f2c6c6160151cd45187
|
[
"MIT"
] | null | null | null |
src/problem4.py
|
wenningr/08-Exam1-201920
|
fcfe19a43c186ccfcfe31f2c6c6160151cd45187
|
[
"MIT"
] | null | null | null |
"""
Exam 1, problem 4.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and PUT_YOUR_NAME_HERE.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_problem4()
def run_test_problem4():
""" Tests the problem4 function. """
print()
print('--------------------------------------------------')
print('Testing the problem4 function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ONE test on this window:
title = 'Test 1 of problem4'
window = rg.RoseWindow(400, 400, title)
problem4(8, 40, rg.Point(10, 350), window)
window.close_on_mouse_click()
# THREE tests on ANOTHER window.
title = 'Tests 2, 3 and 4 of problem4'
window = rg.RoseWindow(450, 400, title)
problem4(5, 50, rg.Point(50, 270), window)
window.continue_on_mouse_click()
problem4(20, 10, rg.Point(10, 350), window)
window.continue_on_mouse_click()
problem4(3, 100, rg.Point(130, 350), window)
window.close_on_mouse_click()
def problem4(number_of_stairs, step_size, starting_point, window):
"""
See problem4_picture.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- Two positive integers
-- An rg.Point.
-- A rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects: Draws, on the given RoseWindow:
-- The given starting_point.
-- A "staircase" of rg.Line objects as DESCRIBED ON THE ATTACHED PDF
(problem4_picture.pdf).
-- The last (highest and furthest to the right) point.
(Draw it as an rg.Point.)
Must render but ** NOT close ** the window.
Type hints:
:type number_of_stairs: int
:type step_size: int
:type starting_point: rg.Point
:type window: rg.RoseWindow
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Tests have been written for you (above).
# IMPORTANT: For PARTIAL CREDIT, you can draw just the black "bottoms"
# of the stair steps.
# -------------------------------------------------------------------------
point1 = rg.Point(starting_point.x, starting_point.y)
point2 = rg.Point(starting_point.x, starting_point.y- step_size)
point3 = point1
for _ in range(number_of_stairs):
line1= rg.Line(point1, rg.Point(point1.x, point1.y- step_size))
line1.color = 'magenta'
line1.thickness = 3
line2= rg.Line(point2, rg.Point(point2.x+ step_size, point2.y))
line2.color = 'black'
line2.thickness = 3
point1 = rg.Point(point1.x+step_size, point1.y- step_size)
point2 = rg.Point(point2.x+step_size, point2.y - step_size)
line1.attach_to(window)
line2.attach_to(window)
point3.attach_to(window)
point4 = point1
point4.attach_to(window)
window.render()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 33.931373
| 79
| 0.56111
|
b64ab06dffd5254d58893f0dbbd4447b761c91aa
| 778
|
py
|
Python
|
var/spack/repos/builtin/packages/py-progressbar2/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
var/spack/repos/builtin/packages/py-progressbar2/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/py-progressbar2/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7
|
2018-09-13T18:04:56.000Z
|
2020-03-18T20:52:06.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyProgressbar2(PythonPackage):
"""A progress bar for Python 2 and Python 3"""
homepage = "https://github.com/WoLpH/python-progressbar"
pypi = "progressbar2/progressbar2-3.50.1.tar.gz"
version('3.50.1', sha256='2c21c14482016162852c8265da03886c2b4dea6f84e5a817ad9b39f6bd82a772')
version('3.39.3', sha256='8e5b5419e04193bb7c3fea71579937bbbcd64c26472b929718c2fe7ec420fe39')
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-python-utils@2.3.0:', type=('build', 'run'))
| 35.363636
| 96
| 0.735219
|
b5db8532236137896b77300aa1942a7c01119214
| 971
|
py
|
Python
|
python3/koans/triangle.py
|
ajl0176/python_koans
|
9ad986cda8543cb1a21ac76e0d269a4d9eedb82b
|
[
"MIT"
] | null | null | null |
python3/koans/triangle.py
|
ajl0176/python_koans
|
9ad986cda8543cb1a21ac76e0d269a4d9eedb82b
|
[
"MIT"
] | null | null | null |
python3/koans/triangle.py
|
ajl0176/python_koans
|
9ad986cda8543cb1a21ac76e0d269a4d9eedb82b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Triangle Project Code.
# Triangle analyzes the lengths of the sides of a triangle
# (represented by a, b and c) and returns the type of triangle.
#
# It returns:
# 'equilateral' if all sides are equal
# 'isosceles' if exactly 2 sides are equal
# 'scalene' if no sides are equal
#
# The tests for this method can be found in
# about_triangle_project.py
# and
# about_triangle_project_2.py
#
def triangle(a, b, c):
if a <= 0 or b <= 0 or c <= 0:
raise TriangleError(AttributeError('Negative edge is not allowed'))
if a + b + c <= 2 * max(a, b, c):
raise TriangleError(AttributeError('largest side should be smaller than sum of 2 other sides'))
if a == b == c:
return 'equilateral'
elif a == b or b == c or c == a:
return 'isosceles'
return 'scalene'
# Error class used in part 2. No need to change this code.
class TriangleError(Exception):
pass
| 29.424242
| 103
| 0.645726
|
14d6a4b7f1e75eb5e2dd89327ae2abaeac87d1fc
| 755
|
py
|
Python
|
Graphs/LeetCodeGraph/QuickUnion.py
|
yabur/LeetCode_Practice
|
d002dedf8f6694b9d313c8facf0d39e688decb15
|
[
"MIT"
] | null | null | null |
Graphs/LeetCodeGraph/QuickUnion.py
|
yabur/LeetCode_Practice
|
d002dedf8f6694b9d313c8facf0d39e688decb15
|
[
"MIT"
] | null | null | null |
Graphs/LeetCodeGraph/QuickUnion.py
|
yabur/LeetCode_Practice
|
d002dedf8f6694b9d313c8facf0d39e688decb15
|
[
"MIT"
] | null | null | null |
# UnionFind class
class UnionFind:
def __init__(self, size):
self.root = [i for i in range(size)]
def find(self, x):
while x != self.root[x]:
x = self.root[x]
return x
def union(self, x, y):
rootX = self.find(x)
rootY = self.find(y)
if rootX != rootY:
self.root[rootY] = rootX
def connected(self, x, y):
return self.find(x) == self.find(y)
# Test Case
uf = UnionFind(10)
# 1-2-5-6-7 3-8-9 4
uf.union(1, 2)
uf.union(2, 5)
uf.union(5, 6)
uf.union(6, 7)
uf.union(3, 8)
uf.union(8, 9)
print(uf.connected(1, 5)) # true
print(uf.connected(5, 7)) # true
print(uf.connected(4, 9)) # false
# 1-2-5-6-7 3-8-9-4
uf.union(9, 4)
print(uf.connected(4, 9)) # true
| 21.571429
| 44
| 0.556291
|
5fec09656cae8fb7d25b51156074c7d10bdacc71
| 27,229
|
py
|
Python
|
UncertainSCI/pce.py
|
SCIInstitute/UncertainSCI
|
16aa5b1a79c873391f32f4535d8b2592546f9233
|
[
"MIT"
] | 1
|
2021-07-25T17:02:36.000Z
|
2021-07-25T17:02:36.000Z
|
UncertainSCI/pce.py
|
SCIInstitute/UncertainSCI
|
16aa5b1a79c873391f32f4535d8b2592546f9233
|
[
"MIT"
] | 70
|
2020-04-09T17:38:12.000Z
|
2022-03-18T17:06:09.000Z
|
UncertainSCI/pce.py
|
SCIInstitute/UncertainSCI
|
16aa5b1a79c873391f32f4535d8b2592546f9233
|
[
"MIT"
] | 7
|
2020-05-28T17:26:05.000Z
|
2021-08-13T21:41:10.000Z
|
from math import floor
import numpy as np
from UncertainSCI.indexing import MultiIndexSet, TotalDegreeSet
from UncertainSCI.distributions import ProbabilityDistribution
from UncertainSCI.utils.casting import to_numpy_array
from UncertainSCI.utils.version import version_lessthan
from UncertainSCI.utils.linalg import lstsq_loocv_error, weighted_lsq
class PolynomialChaosExpansion():
"""Base polynomial chaos expansion class.
Provides interface to construct and manipulate polynomial chaos expansions.
Attributes:
-----------
coefficients: A numpy array of polynomial chaos expansion coefficients.
indices: A MultiIndexSet instance specifying the polynomial
approximation space.
distribution: A ProbabilityDistribution instance indicating the
distribution of the random variable.
samples: The experimental or sample design in stochastic space.
"""
def __init__(self, index_set=None, distribution=None, order=None):
self.coefficients = None
self.accuracy_metrics = {}
self.samples = None
self.model_output = None
self.sampling = 'induced'
self.training = 'christoffel_lsq'
if distribution is None:
raise ValueError('A distribution must be specified')
else:
self.distribution = distribution
if index_set is None:
if order is None:
raise ValueError('Either "index_set" or "order" must be specified')
else:
self.index_set = TotalDegreeSet(dim=distribution.dim, order=order)
else:
self.index_set = index_set
def set_indices(self, index_set):
"""Sets multi-index set for polynomial approximation.
Args:
indices: A MultiIndexSet instance specifying the polynomial
approximation space.
Returns:
None:
"""
if isinstance(index_set, MultiIndexSet):
self.index_set = index_set
else:
raise ValueError('Indices must be a MultiIndexSet object')
def set_distribution(self, distribution):
"""Sets type of probability distribution of random variable.
Args:
distribution: A ProbabilityDistribution instance specifying the
distribution of the random variable.
Returns:
None:
"""
if isinstance(distribution, ProbabilityDistribution):
self.distribution = distribution
else:
raise ValueError(('Distribution must be a ProbabilityDistribution'
'object'))
def check_distribution(self):
if self.distribution is None:
raise ValueError('First set distribution with set_distribution')
def check_indices(self):
if self.index_set is None:
raise ValueError('First set indices with set_indices')
def set_samples(self, samples):
if samples.shape[1] != self.index_set.get_indices().shape[1]:
raise ValueError('Input parameter samples '
'have wrong dimension')
self.samples = samples
def generate_samples(self, new_samples=None, **sampler_options):
"""Generates sample/experimental design for use in PCE construction.
Parameters:
new_samples (array-like, optional): Specifies samples that must be
part of the ensemble.
"""
self.check_distribution()
self.check_indices()
#if sample_type.lower() == 'wafp':
if self.sampling.lower() == 'induced':
if new_samples is None:
p_standard = self.distribution.polys.wafp_sampling(
self.index_set.get_indices(), **sampler_options)
# Maps to domain
self.samples = self.distribution.transform_to_standard.mapinv(
self.distribution.
transform_standard_dist_to_poly.
mapinv(p_standard))
else: # Add new_samples random samples
x = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(
self.samples))
x = self.distribution.polys.wafp_sampling_restart(
self.index_set.get_indices(), x, new_samples,
**sampler_options)
self.samples = self.distribution.transform_to_standard.mapinv(
self.distribution.
transform_standard_dist_to_poly.
mapinv(x))
else:
raise ValueError("Unsupported sample type '{0}' for input\
sample_type".format(self.sampling))
def integration_weights(self):
"""
Generates sample weights associated to integration."
"""
if self.training == 'christoffel_lsq':
p_standard = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(self.samples))
V = self.distribution.polys.eval(p_standard,
self.index_set.get_indices())
weights = self.training_weights()
# Should replace with more well-conditioned procedure
rhs = np.zeros(V.shape[1])
ind = np.where(np.linalg.norm(self.index_set.get_indices(), axis=1)==0)[0]
rhs[ind] = 1.
b = np.linalg.solve((V.T @ np.diag(weights) @ V), rhs)
return weights * (V @ b)
def training_weights(self):
"""
Generates sample weights associated to training.
"""
if self.training == 'christoffel_lsq':
p_standard = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(self.samples))
V = self.distribution.polys.eval(p_standard,
self.index_set.get_indices())
return 1/(np.sum(V**2, axis=1))
else:
raise ValueError('Unrecongized training directive "{0:s}"'.format(self.training))
def build_pce_wlsq(self):
"""
Performs a (weighted) least squares PCE surrogate using saved samples
and model output.
"""
p_standard = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(self.samples))
V = self.distribution.polys.eval(p_standard,
self.index_set.get_indices())
# Precondition for stability
weights = self.training_weights()
#if self.training == 'christoffel_lsq':
# norms = 1/(np.sum(V**2, axis=1))
#else:
# raise ValueError('Unrecongized training directive "{0:s}"'.format(self.training))
coeffs, residuals = weighted_lsq(V, self.model_output, weights)
self.accuracy_metrics['loocv'] = lstsq_loocv_error(V, self.model_output,
weights)
self.accuracy_metrics['residuals'] = residuals
self.coefficients = coeffs
self.p = self.samples # Should get rid of this.
return residuals
def identify_bulk(self, delta=0.5):
"""
Performs (adaptive) bulk chasing for refining polynomial spaces.
Returns the indices associated with a delta-bulk of a OMP-type
indicator.
"""
assert 0 < delta <= 1
indtol = 1e-12
rmargin = self.index_set.get_reduced_margin()
indicators = np.zeros(rmargin.shape[0])
p_standard = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(self.samples))
# Vandermonde-like matrices for current and margin indices
V = self.distribution.polys.eval(p_standard, self.index_set.get_indices())
Vmargin = self.distribution.polys.eval(p_standard, rmargin)
Vnorms = np.sum(V**2, axis=1)
residuals = ((V @ self.coefficients) - self.model_output)
# OMP-style computation of indicator functions
for m in range(rmargin.shape[0]):
norms = 1/Vnorms + Vmargin[:, m]**2
indicators[m] = np.linalg.norm((Vmargin[:, m]*norms).T @ residuals)**2
if np.sum(indicators) <= indtol:
print('Current residual error too small: Not adding indices')
return
else:
indicators /= np.sum(indicators)
# Sort by indicator, and return top indicators that contribute to the
# fraction delta of unity
sorted_indices = np.argsort(indicators)[::-1]
sorted_cumulative_indicators = np.cumsum(indicators[sorted_indices])
bulk_size = np.argmax(sorted_cumulative_indicators >= delta) + 1
return rmargin[sorted_indices[:bulk_size], :]
def augment_samples_idist(self, K, weights=None, fast_sampler=True):
"""
Augments random samples from induced distribution. Typically done via
an adaptive refinement procedure. As such some inputs can be given to
customize how the samples are drawn in the context of adaptivity:
K: how many samples to add (required)
weights: a discrete probability distribution on
self.index_set.get_indices() that describes how the induced
distrubtion is sampled. Default is uniform.
"""
return self.distribution.polys.idist_mixture_sampling(K,
self.index_set.get_indices(),
weights=weights,
fast_sampler=fast_sampler)
def adapt_expressivity(self, max_new_samples=10, **chase_bulk_options):
"""
Adapts the PCE approximation by increasing expressivity.
(Intended to combat residual error.)
"""
from numpy.linalg import norm
Mold = self.samples.shape[0]
indices = []
sample_count = []
KK = self.accuracy_metrics['residuals'].size
residuals = [norm(self.accuracy_metrics['residuals'])/np.sqrt(KK), ]
loocv = [norm(self.accuracy_metrics['loocv'])/np.sqrt(KK), ]
while self.samples.shape[0] < max_new_samples + Mold:
samples_left = max_new_samples + Mold - self.samples.shape[0]
a, b = self.chase_bulk(max_new_samples=samples_left, **chase_bulk_options)
indices.append(self.index_set.get_indices()[-a:, :])
sample_count.append(b)
residuals.append(norm(self.accuracy_metrics['residuals'])/np.sqrt(KK))
loocv.append(norm(self.accuracy_metrics['loocv'])/np.sqrt(KK))
return residuals, loocv, indices, sample_count
def adapt_robustness(self, max_new_samples=10, verbosity=1):
"""
Adapts the PCE approximation by increasing robustness.
(Intended to combat cross-validation error.)
"""
# Just add new samples
Mold = self.samples.shape[0]
self.generate_samples(new_samples=max_new_samples)
# Resample model
self.model_output = np.vstack((self.model_output,
np.zeros([max_new_samples, self.model_output.shape[1]])))
for ind in range(Mold, Mold+max_new_samples):
self.model_output[ind, :] = self.model(self.samples[ind, :])
old_accuracy = self.accuracy_metrics.copy()
self.build_pce_wlsq()
KK = np.sqrt(self.model_output.shape[1])
if verbosity > 0:
errstr = "new samples: {0:6d}\n \
old residual: {1:1.3e}, old loocv: {2:1.3e}\n \
new residual: {3:1.3e}, new loocv: {4:1.3e}\
".format(max_new_samples,
np.linalg.norm(old_accuracy['residuals']/KK),
np.linalg.norm(old_accuracy['loocv']/KK),
np.linalg.norm(self.accuracy_metrics['residuals']/KK),
np.linalg.norm(self.accuracy_metrics['loocv'])/KK)
print(errstr)
def chase_bulk(self, delta=0.5, max_new_samples=None, max_new_indices=None,
add_rule=None, mult_rule=None, verbosity=1):
"""
Performs adaptive bulk chasing, which (i) adds the most "important"
indices to the polynomial index set, (ii) takes more samples, (iii)
updates the PCE approximation, including statistics and error metrics.
Args:
max_new_samples (int): Maximum number of new samples to add.
Defaults to None.
max_new_indices (int): Maximum number of new PCE indices to add.
Defaults to None.
add_rule (int): Specifies number of samples added as a function
of number of added indices. Nsamples = Nindices + add_rule.
Defaults to None.
mult_rule (float): Specifies number of samples added as a function
of number of added indices. Nsamples =
int(Nindices * add_rule). Defaults to None.
"""
if (max_new_samples is not None) and (max_new_indices is not None):
assert False, "Cannot specify both new sample and new indices max"
if (add_rule is not None) and (mult_rule is None):
samplefun = lambda Nindices: int(Nindices + add_rule)
elif (add_rule is None) and (mult_rule is not None):
samplefun = lambda Nindices: int(Nindices * mult_rule)
elif (add_rule is None) and (mult_rule is None):
samplefun = lambda Nindices: int(Nindices + 2)
else:
assert False, 'Cannot specify both an '\
'additive and multiplicative rule'
indices = self.identify_bulk(delta=delta)
# Determine number of indices we augment by
if max_new_samples is not None: # Limited by sample count
assert max_new_samples > 0
Nindices = len(indices)
while samplefun(Nindices) > max_new_samples:
Nindices -= 1
# Require at least 1 index to be added.
Nindices = max(1, Nindices)
elif max_new_indices is not None: # Limited by number of indices
Nindices = max_new_indices
else: # No limits: take all indices
Nindices = len(indices)
assert Nindices > 0
L = self.index_set.size()
weights = np.zeros(L + Nindices)
# Assign 50% weight to new indices
weights[:L] = 0.5 / L
weights[L:] = 0.5 / Nindices
# Add indices to index set
self.index_set.augment(indices[:Nindices, :])
# Add new samples
Mold = self.samples.shape[0]
Nsamples = samplefun(Nindices)
self.generate_samples(new_samples=Nsamples, weights=weights)
# Resample model
self.model_output = np.vstack((self.model_output,
np.zeros([Nsamples, self.model_output.shape[1]])))
for ind in range(Mold, Mold+Nsamples):
self.model_output[ind, :] = self.model(self.samples[ind, :])
old_accuracy = self.accuracy_metrics.copy()
self.build_pce_wlsq()
KK = np.sqrt(self.model_output.shape[1])
if verbosity > 0:
errstr = ('new indices: {0:6d}, new samples: {1:6d}\n'
'old residual: {2:1.3e}, old loocv: {3:1.3e}\n'
'new residual: {4:1.3e}, new loocv: {5:1.3e}'
).format(Nindices, Nsamples,
np.linalg.norm(old_accuracy['residuals']/KK),
np.linalg.norm(old_accuracy['loocv']/KK),
np.linalg.norm(self.accuracy_metrics['residuals']/KK),
np.linalg.norm(self.accuracy_metrics['loocv'])/KK)
print(errstr)
return Nindices, Nsamples
def build(self, model=None, model_output=None, **options):
"""Builds PCE from sampling and approximation settings.
Args:
model: A pointer to a function with the syntax xi ---> model(xi),
which returns a vector corresponding to the model evaluated at
the stochastic parameter value xi. The input xi to the model
function should be a vector of size self.dim, and the output
should be a 1D numpy array. If model_output is None, this is
required. If model_output is given, this is ignored.
model_output: A numpy.ndarray corresponding to the output of the
model at the sample locations specified by self.samples. This is
required if the input model is None.
Returns:
None:
"""
self.check_distribution()
self.check_indices()
# Samples on standard domain
if self.samples is None:
self.generate_samples(**options)
else:
pass # User didn't specify samples now, but did previously
if self.model_output is None: # We need to generate data
if model_output is None:
if model is None:
raise ValueError('Must input argument "model" or "model_output".')
else:
self.model = model
for ind in range(self.samples.shape[0]):
if model_output is None:
model_output = model(self.samples[ind, :])
M = model_output.size
model_output = np.concatenate([model_output.reshape([1, M]),
np.zeros([self.samples.shape[0]-1, M])], axis=0)
else:
model_output[ind, :] = model(self.samples[ind, :])
self.model_output = model_output
else:
pass # We'll assume the user did things correctly.
# For now, we only have 1 method:
if self.training == 'christoffel_lsq':
return self.build_pce_wlsq()
else:
raise ValueError('Unrecongized training directive "{0:s}"'.format(self.training))
def assert_pce_built(self):
if self.coefficients is None:
raise ValueError('First build the PCE with pce.build()')
def mean(self):
"""Returns PCE mean.
Returns:
numpy.ndarray: A vector containing the PCE mean, of size equal to the size
of the vector of the model output.
"""
self.assert_pce_built()
return self.coefficients[0, :]
def stdev(self):
"""
Returns PCE standard deviation
Returns:
numpy.ndarray: A vector containing the PCE standard deviation, of size
equal to the size of the vector of the model output.
"""
self.assert_pce_built()
return np.sqrt(np.sum(self.coefficients[1:, :]**2, axis=0))
def pce_eval(self, p, components=None):
"""Evaluates the PCE at particular parameter locations.
Args:
p: An array (satisfying p.shape[1]==self.dim) containing a set of
parameter points at which to evaluate the PCE prediction.
components: An array of non-negative integers specifying which
indices in the model output to compute. Other indices are
ignored. If given as None (default), then all components are
computed.
Returns:
numpy.ndarray: An array containing evaluations (predictions) from the PCE
emulator. If the input components is None, this array is of size (
self.p.shape[0] x self.coefficients.shape[1] ). Otherwise, the
second dimension is of size components.size.
"""
self.assert_pce_built()
p_std = self.distribution.transform_standard_dist_to_poly.map(
self.distribution.transform_to_standard.map(p))
if components is None:
return np.dot(self.distribution.polys.eval(p_std,
self.index_set.
get_indices()),
self.coefficients)
else:
return np.dot(self.distribution.polys.eval(p_std,
self.index_set.
get_indices()),
self.coefficients[:, components])
eval = pce_eval
def quantile(self, q, M=100):
"""
Computes q-quantiles using M-point Monte Carlo sampling.
"""
self.assert_pce_built()
q = to_numpy_array(q)
# Maximum number of floats generated at any given time
MF = max([int(1e6), M, self.distribution.dim])
# How many model degrees of freedom we can consider at any time
pce_batch_size = floor(MF/M)
quantiles = np.zeros([len(q), self.coefficients.shape[1]])
pce_counter = 0
p = self.distribution.MC_samples(M)
while pce_counter < self.coefficients.shape[1]:
end_ind = min([self.coefficients.shape[1], pce_counter + pce_batch_size])
inds = range(pce_counter, end_ind)
ensemble = self.pce_eval(p, components=inds)
quantiles[:, inds] = np.quantile(ensemble, q, axis=0)
pce_counter = end_ind
return quantiles
def total_sensitivity(self, dim_indices=None, vartol=1e-16):
"""
Computes total sensitivity associated to dimensions dim_indices from
PCE coefficients. dim_indices should be a list-type containing
dimension indices.
The output is len(js) x self.coefficients.shape[1]
"""
self.assert_pce_built()
if dim_indices is None:
dim_indices = range(self.distribution.dim)
dim_indices = np.asarray(dim_indices, dtype=int)
indices = self.index_set.get_indices()
# variance_rows = np.linalg.norm(indices, axis=1) > 0.
# variances = np.sum(self.coefficients[variance_rows,:]**2, axis=0)
variance = self.stdev()**2
total_sensitivities = np.zeros([dim_indices.size, self.coefficients.shape[1]])
# Return 0 sensitivity if the variance is 0.
zerovar = variance < vartol
for (qj, j) in enumerate(dim_indices):
total_sensitivities[qj, ~zerovar] = np.sum(self.coefficients[np.ix_(indices[:, j] > 0, ~zerovar)]**2,
axis=0) / variance[~zerovar]
return total_sensitivities
def global_sensitivity(self, dim_lists=None, vartol=1e-16):
"""
Computes global sensitivity associated to dimensional indices dim_lists
from PCE coefficients.
dim_lists should be a list of index lists. The global sensitivity for each
index list is returned.
The output is len(dim_lists) x self.coefficients.shape[1]
"""
# unique_rows = np.vstack({tuple(row) for row in lambdas})
# # Just making sure
# assert unique_rows.shape[0] == lambdas.shape[0]
indices = self.index_set.get_indices()
# variance_rows = np.linalg.norm(indices, axis=1) > 0.
# assert np.sum(np.invert(variance_rows)) == 1
variance = self.stdev()**2
global_sensitivities = np.zeros([len(dim_lists), self.coefficients.shape[1]])
dim = self.distribution.dim
# Return 0 sensitivity if the variance is 0.
zerovar = variance < vartol
for (qj, j) in enumerate(dim_lists):
jc = np.setdiff1d(range(dim), j)
inds = np.logical_and(np.all(indices[:, j] > 0, axis=1),
np.all(indices[:, jc] == 0, axis=1))
global_sensitivities[qj, ~zerovar] = np.sum(self.coefficients[np.ix_(inds, ~zerovar)]**2,
axis=0) / variance[~zerovar]
return global_sensitivities
def global_derivative_sensitivity(self, dim_list):
"""
Computes global derivative-based sensitivity indices. For a
PCE with respect to a :math:`d`-dimensional random variable :math:`Z`,
then this senstivity index along dimension :math:`i` is defined as
.. math::
S_i \\coloneqq E \\left[ p(Z) \\right] = \\int p(z) \\omega(z) d z,
where :math:`E[\\cdot]` it expectation operator, :math:`p` is the PCE
emulator, and :math:`\\omega` is the probability density function for
the random variable :math:`Z`.
These sensitivity indices measure the average rate-of-change of the PCE
response with respect to dimension :math:`i`.
Args:
dim_lists: A list-type iterable with D entries, containing
dimensional indices in 0-based indexing. All entries must be
between 0 and self.distribution.dim.
Returns:
S: DxK array, where each row corresponds to the sensitivity index
:math:`S_i` across all K features of the PCE model.
"""
indices = self.index_set.get_indices()
assert all([0 <= dim <= self.distribution.dim-1 for dim in dim_list])
D = len(dim_list)
S = np.zeros([D, self.coefficients.shape[1]])
all_dims = range(self.distribution.dim)
# TODO: make map compositions default in PCE
composed_map = self.distribution.transform_standard_dist_to_poly.compose(
self.distribution.transform_to_standard)
# Precompute derivative expansion matrices
M = self.index_set.max_univariate_degree()
Cs = [None, ]*self.distribution.dim
for q in range(self.distribution.dim):
Cs[q] = self.distribution.\
polys.\
get_univariate_derivative_expansion(q, 1, M, 0)
for ind, dim in enumerate(dim_list):
# Rows of indices whose non-column-dim entries are 0 contribute
notdim = [val for val in all_dims if val != dim]
flags = self.index_set.zero_indices(notdim)
b0 = 1.
for val in notdim:
b0 *= self.distribution.polys.\
get_univariate_recurrence(0, val)[0, 1]
for q in range(self.distribution.dim):
S[ind, :] += (composed_map.A[q, dim] *
Cs[q][indices[flags, dim]].T @
self.coefficients[flags, :]).flatten()
S[ind, :] *= b0
return S
| 39.348266
| 113
| 0.581953
|
6f825807710b411626d4d8130dc7b1ff21429419
| 372
|
py
|
Python
|
vbb/users/migrations/0002_user_date_of_birth.py
|
VillageBookBuilders/vbb-portal-packend
|
9563b492aa93f12fdfed41a905ff185182e97dd8
|
[
"MIT"
] | 1
|
2022-03-30T18:12:49.000Z
|
2022-03-30T18:12:49.000Z
|
vbb/users/migrations/0002_user_date_of_birth.py
|
VillageBookBuilders/vbb-portal-backend
|
decdec392f7bd585b73e5554b20c17baea5d133d
|
[
"MIT"
] | 22
|
2022-02-28T02:37:03.000Z
|
2022-03-28T02:32:35.000Z
|
vbb/users/migrations/0002_user_date_of_birth.py
|
VillageBookBuilders/vbb-portal-packend
|
9563b492aa93f12fdfed41a905ff185182e97dd8
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-20 23:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_of_birth',
field=models.DateField(null=True),
),
]
| 19.578947
| 47
| 0.583333
|
d06b2a1f6975c339643f7ab2099c884429ffc33f
| 13,531
|
py
|
Python
|
luigi/contrib/postgres.py
|
nathantsoi/luigi
|
a5f95fe2381f74ed083aa1a5004279d76fafa72f
|
[
"Apache-2.0"
] | 5
|
2015-02-26T18:52:56.000Z
|
2017-07-07T05:47:18.000Z
|
luigi/contrib/postgres.py
|
nathantsoi/luigi
|
a5f95fe2381f74ed083aa1a5004279d76fafa72f
|
[
"Apache-2.0"
] | 6
|
2015-02-10T17:09:21.000Z
|
2022-03-22T10:16:09.000Z
|
luigi/contrib/postgres.py
|
nathantsoi/luigi
|
a5f95fe2381f74ed083aa1a5004279d76fafa72f
|
[
"Apache-2.0"
] | 9
|
2015-01-26T14:47:57.000Z
|
2020-07-07T17:01:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a subclass of :py:class:`~luigi.target.Target` that writes data to Postgres.
Also provides a helper task to copy data into a Postgres table.
"""
import datetime
import logging
import re
import tempfile
from luigi import six
import luigi
from luigi.contrib import rdbms
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. Will crash at runtime if postgres functionality is used.")
class MultiReplacer(object):
"""
Object for one-pass replace of multiple words
Substituted parts will not be matched against other replace patterns, as opposed to when using multipass replace.
The order of the items in the replace_pairs input will dictate replacement precedence.
Constructor arguments:
replace_pairs -- list of 2-tuples which hold strings to be replaced and replace string
Usage:
.. code-block:: python
>>> replace_pairs = [("a", "b"), ("b", "c")]
>>> MultiReplacer(replace_pairs)("abcd")
'bccd'
>>> replace_pairs = [("ab", "x"), ("a", "x")]
>>> MultiReplacer(replace_pairs)("ab")
'x'
>>> replace_pairs.reverse()
>>> MultiReplacer(replace_pairs)("ab")
'xb'
"""
# TODO: move to misc/util module
def __init__(self, replace_pairs):
"""
Initializes a MultiReplacer instance.
:param replace_pairs: list of 2-tuples which hold strings to be replaced and replace string.
:type replace_pairs: tuple
"""
replace_list = list(replace_pairs) # make a copy in case input is iterable
self._replace_dict = dict(replace_list)
pattern = '|'.join(re.escape(x) for x, y in replace_list)
self._search_re = re.compile(pattern)
def _replacer(self, match_object):
# this method is used as the replace function in the re.sub below
return self._replace_dict[match_object.group()]
def __call__(self, search_string):
# using function replacing for a per-result replace
return self._search_re.sub(self._replacer, search_string)
# these are the escape sequences recognized by postgres COPY
# according to http://www.postgresql.org/docs/8.1/static/sql-copy.html
default_escape = MultiReplacer([('\\', '\\\\'),
('\t', '\\t'),
('\n', '\\n'),
('\r', '\\r'),
('\v', '\\v'),
('\b', '\\b'),
('\f', '\\f')
])
class PostgresTarget(luigi.Target):
"""
Target for a resource in Postgres.
This will rarely have to be directly instantiated by the user.
"""
marker_table = luigi.configuration.get_config().get('postgres', 'marker-table', 'table_updates')
# Use DB side timestamps or client side timestamps in the marker_table
use_db_timestamps = True
def __init__(
self, host, database, user, password, table, update_id, port=5432
):
"""
Args:
host (str): Postgres server address. Possibly a host:port string.
database (str): Database name
user (str): Database user
password (str): Password for specified user
update_id (str): An identifier for this data set
port (int): Postgres server port.
"""
if ':' in host:
self.host, self.port = host.split(':')
else:
self.host = host
self.port = port
self.database = database
self.user = user
self.password = password
self.table = table
self.update_id = update_id
def touch(self, connection=None):
"""
Mark this update as complete.
Important: If the marker table doesn't exist, the connection transaction will be aborted
and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
# TODO: test this
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
if self.use_db_timestamps:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
""".format(marker_table=self.marker_table),
(self.update_id, self.table))
else:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table, inserted)
VALUES (%s, %s, %s);
""".format(marker_table=self.marker_table),
(self.update_id, self.table,
datetime.datetime.now()))
def exists(self, connection=None):
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
try:
cursor.execute("""SELECT 1 FROM {marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE:
row = None
else:
raise
return row is not None
def connect(self):
"""
Get a psycopg2 connection object to the database where the table is.
"""
connection = psycopg2.connect(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password)
connection.set_client_encoding('utf-8')
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
if self.use_db_timestamps:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP DEFAULT NOW())
""".format(marker_table=self.marker_table)
else:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP);
""".format(marker_table=self.marker_table)
try:
cursor.execute(sql)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.DUPLICATE_TABLE:
pass
else:
raise
connection.close()
def open(self, mode):
raise NotImplementedError("Cannot open() PostgresTarget")
class CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Postgres
Usage:
Subclass and override the required `host`, `database`, `user`,
`password`, `table` and `columns` attributes.
To customize how to access data from an input task, override the `rows` method
with a generator that yields each row as a tuple with fields ordered according to `columns`.
"""
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip('\n').split('\t')
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value))
# everything below will rarely have to be overridden
def output(self):
"""
Returns a PostgresTarget representing the inserted dataset.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
def copy(self, cursor, file):
if isinstance(self.columns[0], six.string_types):
column_names = self.columns
elif len(self.columns[0]) == 2:
column_names = [c[0] for c in self.columns]
else:
raise Exception('columns must consist of column strings or (column string, type string) tuples (was %r ...)' % (self.columns[0],))
cursor.copy_from(file, self.table, null=r'\\N', sep=self.column_separator, columns=column_names)
def run(self):
"""
Inserts data generated by rows() into target table.
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
Normally you don't want to override this.
"""
if not (self.table and self.columns):
raise Exception("table and columns need to be specified")
connection = self.output().connect()
# transform all data generated by rows() using map_column and write data
# to a temporary file for import using postgres COPY
tmp_dir = luigi.configuration.get_config().get('postgres', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(dir=tmp_dir)
n = 0
for row in self.rows():
n += 1
if n % 100000 == 0:
logger.info("Wrote %d lines", n)
rowstr = self.column_separator.join(self.map_column(val) for val in row)
rowstr += "\n"
tmp_file.write(rowstr.encode('utf-8'))
logger.info("Done writing, importing at %s", datetime.datetime.now())
tmp_file.seek(0)
# attempt to copy the data into postgres
# if it fails because the target table doesn't exist
# try to create it by running self.create_table
for attempt in range(2):
try:
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, tmp_file)
self.post_copy(connection)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE and attempt == 0:
# if first attempt fails with "relation not found", try creating table
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
else:
raise
else:
break
# mark as complete in same transaction
self.output().touch(connection)
# commit and clean up
connection.commit()
connection.close()
tmp_file.close()
class PostgresQuery(rdbms.Query):
"""
Template task for querying a Postgres compatible database
Usage:
Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes.
Optionally one can override the `autocommit` attribute to put the connection for the query in autocommit mode.
Override the `run` method if your use case requires some action with the query result.
Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once
To customize the query signature as recorded in the database marker table, override the `update_id` property.
"""
def run(self):
connection = self.output().connect()
connection.autocommit = self.autocommit
cursor = connection.cursor()
sql = self.query
logger.info('Executing query from task: {name}'.format(name=self.__class__))
cursor.execute(sql)
# Update marker table
self.output().touch(connection)
# commit and close connection
connection.commit()
connection.close()
def output(self):
"""
Returns a PostgresTarget representing the executed query.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
| 34.606138
| 142
| 0.593969
|
7eb5768826066dbc24698167ba588889e82a6aac
| 4,169
|
py
|
Python
|
cinemasci/cview/__init__.py
|
utkarshayachit/cinemasci
|
a06218b2ab6bad7f7c2a68296a4d69297b9302a8
|
[
"BSD-3-Clause"
] | null | null | null |
cinemasci/cview/__init__.py
|
utkarshayachit/cinemasci
|
a06218b2ab6bad7f7c2a68296a4d69297b9302a8
|
[
"BSD-3-Clause"
] | null | null | null |
cinemasci/cview/__init__.py
|
utkarshayachit/cinemasci
|
a06218b2ab6bad7f7c2a68296a4d69297b9302a8
|
[
"BSD-3-Clause"
] | null | null | null |
import cinemasci
import http.server
import socketserver
from urllib.parse import urlparse
from urllib.parse import parse_qs
from os import path
from os.path import relpath
from os import getcwd
from os import access
from os import R_OK
import pathlib
#
# global variables - can't seem to add an instance variable to the
# subclass of SimpleHTTPRequestHandler
#
TheDatabase = "CINEMAJUNK"
CinemaInstallPath = "CINEMAJUNK"
def set_install_path():
global CinemaInstallPath
CinemaInstallPath = str(pathlib.Path(__file__).parent.absolute())
def get_relative_install_path( initpath ):
global CinemaInstallPath
result = path.join(CinemaInstallPath, initpath.strip("/"))
result = relpath(result, getcwd())
print("REL IN PATH: {}".format(result))
return result
#
# CinemaReqestHandler
#
# Processes GET requests to find viewers and databases
#
class CinemaRequestHandler(http.server.SimpleHTTPRequestHandler):
def log(self, message):
if False:
print(message)
def do_GET(self):
global TheDatabase
self.log("PATH ORIG: {}".format(self.path))
query_components = parse_qs(urlparse(self.path).query)
self.log("QUERY : {}".format(query_components))
self.path = self.path.split("?")[0]
self.log("PATH : {}".format(self.path))
# set attributes from a query in the GET URL
if "databases" in query_components:
TheDatabase = query_components["databases"][0]
# if not TheDatabase.startswith("/"):
# TheDatabase = "/" + TheDatabase
self.log("SET DB : {}".format(TheDatabase))
if "viewer" in query_components:
# handle a request for a viewer
viewer = query_components["viewer"][0]
if viewer == "explorer":
# handle a request for the Cinema:Explorer viewer
self.log("EXPLORER")
self.path = get_relative_install_path("/cinema_explorer.html")
return http.server.SimpleHTTPRequestHandler.do_GET(self)
elif viewer == "view":
# handle a request for the Cinema:View viewer
self.log("VIEW")
self.path = get_relative_install_path("/cinema_view.html")
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
self.log("VIEWER: -{}-".format(viewer))
if self.path.startswith(TheDatabase):
# handle requests to the database
# remap absolute paths
if TheDatabase.startswith("/"):
self.log("DB QUERY : {}".format(self.path))
self.path = relpath(self.path, getcwd())
self.log("CWD : {}".format(getcwd()))
self.log("REL DB : {}".format(self.path))
if access(self.path, R_OK):
self.log("ACCESSING: {}".format(self.path))
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
print("ERROR: cannot access file: {}".format(self.path))
elif self.path.startswith("/cinema"):
# handle a requests for sub components of the viewers
# NOTE: fragile - requires 'cinema' path be unique
self.log("CINEMA : {}".format(self.path))
self.path = get_relative_install_path(self.path)
self.log(" {}".format(self.path))
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
# everything else
self.log("NORMAL : {}".format(self.path))
return http.server.SimpleHTTPRequestHandler.do_GET(self)
def run_cinema_server( viewer, data, port, assetname=None):
localhost = "http://127.0.0.1"
set_install_path()
my_handler = CinemaRequestHandler
with socketserver.TCPServer(("", port), my_handler) as httpd:
urlstring = "{}:{}/?viewer={}&databases={}".format(localhost, port, viewer, data)
if not assetname is None:
urlstring = urlstring + "&assetname{}".format(assetname)
print(urlstring)
httpd.serve_forever()
| 34.741667
| 89
| 0.614296
|
03629efe5ba34a829d8a1230db06dd2e957fbcff
| 1,707
|
py
|
Python
|
tech/entities/processed_lingo.py
|
fferegrino/techlingo.fyi
|
0ed41160b4a8c854f9f0c9104ce619e73c2c7e26
|
[
"MIT"
] | 4
|
2021-06-20T00:53:19.000Z
|
2022-02-04T19:26:12.000Z
|
tech/entities/processed_lingo.py
|
fferegrino/techlingo.fyi
|
0ed41160b4a8c854f9f0c9104ce619e73c2c7e26
|
[
"MIT"
] | 16
|
2021-06-15T06:10:15.000Z
|
2021-07-20T19:02:11.000Z
|
tech/entities/processed_lingo.py
|
fferegrino/techlingo.fyi
|
0ed41160b4a8c854f9f0c9104ce619e73c2c7e26
|
[
"MIT"
] | 1
|
2021-06-20T03:03:42.000Z
|
2021-06-20T03:03:42.000Z
|
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import List
from tech.entities import languages
from tech.entities.author import Author
from tech.entities.lingo import Lingo
@dataclass
class ProcessedLingo:
original_title: str
localised_title: str
identifier: str
content: str
author: str
author_url: str
category: str
acronym: str
abbreviation: str
language: str
path: Path
tags: List[str]
@property
def initial(self) -> str:
return self.identifier[0]
@property
def decorated_acronym(self) -> str:
if self.acronym:
return "".join(
(f"<b>{char}</b>" if char.isupper() else char for char in self.acronym)
)
return ""
@property
def abbr(self) -> str:
return self.abbreviation or ""
@property
def slug(self) -> str:
return str(self.path)
@classmethod
def from_thing(cls, lingo: Lingo, author: Author, original_title: str, path: Path):
return cls(
original_title=original_title,
localised_title=lingo.term,
identifier=lingo.id,
content=lingo.text,
author=author.display_name,
author_url=author.main_link,
category=lingo.category,
abbreviation=lingo.abbreviation,
acronym=lingo.acronym,
language=languages[lingo.language],
path=path,
tags=lingo.tags or [],
)
def asdict(self):
inner_dict = asdict(self)
inner_dict.pop("path")
inner_dict.update({"initial": self.initial, "slug": self.slug})
return inner_dict
| 25.477612
| 87
| 0.610428
|
4a67a5ec035e8e298c5f79e2b21f639e0b74bb14
| 1,160
|
py
|
Python
|
core/migrations/0015_auto_20180822_1456.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | null | null | null |
core/migrations/0015_auto_20180822_1456.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | null | null | null |
core/migrations/0015_auto_20180822_1456.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-22 14:56
from __future__ import unicode_literals
from directory_constants.constants import cms
from django.db import migrations
def add_service_name_to_existing_apps(apps, schema_editor):
InvestApp = apps.get_model('invest', 'InvestApp')
FASApp = apps.get_model('find_a_supplier', 'FindASupplierApp')
ExReadApp = apps.get_model('export_readiness', 'ExportReadinessApp')
InvestApp.objects.all().update(service_name=cms.INVEST)
FASApp.objects.all().update(service_name=cms.FIND_A_SUPPLIER)
ExReadApp.objects.all().update(service_name=cms.EXPORT_READINESS)
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20180822_0915'),
('export_readiness', '0013_exportreadinessapp_service_name'),
('find_a_supplier', '0062_auto_20180817_1630_squashed_0065_auto_20180829_1027'),
('invest', '0009_investapp_service_name')
]
operations = [
migrations.RunPython(
add_service_name_to_existing_apps,
reverse_code=migrations.RunPython.noop,
elidable=True
)
]
| 34.117647
| 88
| 0.722414
|
9b0524977219ded1ac54967d09b1d050510f5ae7
| 2,915
|
py
|
Python
|
qa/rpc-tests/zapwallettxes.py
|
NoirX/NoirShares
|
f4552e470da178ce8a04d9a1a01c37763fb5412e
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/zapwallettxes.py
|
NoirX/NoirShares
|
f4552e470da178ce8a04d9a1a01c37763fb5412e
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/zapwallettxes.py
|
NoirX/NoirShares
|
f4552e470da178ce8a04d9a1a01c37763fb5412e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The NoirShares Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NoirSharesTestFramework
from test_framework.util import *
class ZapWalletTXesTest (NoirSharesTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
#restart noirsharesd
self.nodes[0].stop()
noirsharesd_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
self.nodes[0].stop()
noirsharesd_processes[0].wait()
#restart noirsharesd with zapwallettxes
self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
#there must be a expection because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
if __name__ == '__main__':
ZapWalletTXesTest ().main ()
| 37.857143
| 106
| 0.650086
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.