max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
samples/highscore.py
|
cmu-cs-academy/desktop-cmu-graphics
| 3
|
12777451
|
<gh_stars>1-10
from cmu_graphics import *
import os
# Set up or reset the game
# Here we clear labels from the screen, create our game objects,
# set their properties, and set our mode to playing
# (rather than start screen or game over)
def startGame():
app.group.clear()
app.mode = 'playing'
app.scoreLabel = Label('Score: 0', 200, 30, size=30)
app.ball = Circle(-30, 200, 30, fill='purple')
app.ballSpeed = 3
app.score = 0
def onMouseMove(mouseX, mouseY):
# If we're in the start or game over screens, there's no need to
# handle mouse presses
if app.mode != 'playing':
return
# If we touch the ball, increase our score, move the ball to the left,
# give it a random Y position, and make it move faster
if app.ball.hits(mouseX, mouseY):
app.score += 1
app.ball.centerY = randrange(60, 350)
app.ball.right = 0
app.ballSpeed += 1
app.scoreLabel.value = 'Score: ' + str(app.score)
# A helper function for drawing text centered on the screen
def drawText(linesList):
lineHeight = 35
# lineY starts out at the center (200) minus half the height of our
# whole block of text
lineY = 200 - ((len(linesList) * lineHeight) // 2)
for line in linesList:
# Create a label for each line, and move lineY down so the next
# line is drawn lower
Label(line, 200, lineY, align='center', size=25)
lineY += lineHeight
# Handle a player loss
def gameOver():
# Clear game objects from the screen and set our mode
# so we stop listening to mouse events and can handle key presses
# correctly later
app.mode = 'gameOver'
app.group.clear()
# We don't know the high score or the user with the highest score yet,
# so set them to placeholders
highscore = 0
highscoreUser = ''
# If the high score file already exists, open it and read it
if os.path.exists('highscore.txt'):
with open('highscore.txt', 'r') as f:
# split by commas because we store data in the file like:
# "highscore,highscoreUser"
highscore, highscoreUser = f.read().split(',')
# We read strings from the file, so we have to convert highscore
# to a string
highscore = int(highscore)
# If our score is better than the saved highscore
if app.score > highscore:
# Set the new highscore to the current score, get the user's initials
# and save them to the file in the format that we will read later:
# "highscore,highscoreUser"
highscore = app.score
highscoreUser = app.getTextInput("New high score! Enter your initials.")
with open('highscore.txt', 'w+') as f:
f.write(str(app.score) + ',' + highscoreUser)
drawText([
'You Lost',
'',
'High Score: ' + str(highscore) + ' by ' + highscoreUser,
'',
"Press 'r' to restart"
])
def onKeyPress(key):
# Restart or start the game from the game over or start screens
if ((key == 'r' and app.mode == 'gameOver') or
(key == 's' and app.mode == 'startScreen')):
startGame()
def onStep():
# Only move the ball if we're playing the game
if app.mode == 'playing':
app.ball.centerX += app.ballSpeed
# If the ball exits the screen, the player loses
if app.ball.left > 400:
gameOver()
def initStartScreen():
# Set the mode to startScreen so we know to handle the s key correctly
app.mode = 'startScreen'
drawText([
'A Simple Game With High Scores',
'',
'To play:',
'Hover over the purple ball before',
'it reaches the edge of the screen',
'',
"Press 's' to start",
'Good Luck!'
])
initStartScreen()
cmu_graphics.run()
| 3.375
| 3
|
benchmark/test-msgpack.py
|
azawawi/perl6-msgpack
| 2
|
12777452
|
<filename>benchmark/test-msgpack.py
#!/usr/bin/env python
import msgpack
def test():
SIZE = 10000000;
data = [1] * SIZE
packed = msgpack.packb(data)
unpacked = msgpack.unpackb(packed)
for i in range(1,10 + 1):
test();
| 2.359375
| 2
|
manyssh/about.py
|
linkdd/manyssh
| 3
|
12777453
|
<reponame>linkdd/manyssh
# -*- coding: utf-8 -*-
from gi.repository import Gtk
from manyssh import meta
class About(Gtk.AboutDialog):
"""
ManySSH about dialog.
"""
def __init__(self, *args, **kwargs):
kwargs['title'] = '{0} {1}'.format(meta.PROGRAM_NAME, meta.VERSION)
super(About, self).__init__(*args, **kwargs)
self.set_program_name(meta.PROGRAM_NAME)
self.set_version(meta.VERSION)
self.set_authors(meta.AUTHORS)
self.set_license(meta.LICENSE)
self.connect('response', lambda s, r: self.destroy())
self.show_all()
| 2.0625
| 2
|
tests/agent/test_caracal_backend.py
|
dioptra-io/iris
| 6
|
12777454
|
<filename>tests/agent/test_caracal_backend.py
from iris.agent.backend.caracal import probe
from tests.helpers import superuser
@superuser
def test_probe(agent_settings, tmp_path):
excluded_filepath = tmp_path / "excluded.csv"
excluded_filepath.write_text("8.8.4.4/32")
probes_filepath = tmp_path / "probes.csv"
probes_filepath.write_text(
"8.8.8.8,24000,33434,32,icmp\n8.8.4.4,24000,33434,32,icmp"
)
results_filepath = tmp_path / "results.csv"
prober_statistics = {}
agent_settings.AGENT_CARACAL_EXCLUDE_PATH = excluded_filepath
probe(
agent_settings,
probes_filepath,
results_filepath,
1,
None,
100,
prober_statistics,
)
assert prober_statistics["packets_sent"] == 1
assert prober_statistics["filtered_prefix_excl"] == 1
| 2.125
| 2
|
voicenet/utils/__init__.py
|
Robofied/Voicenet
| 32
|
12777455
|
# from .features_extraction import FeatureExtraction
# print("Invoking __init__.py for {}".format(__name__))
# __all__ = ["FeatureExtraction"]
| 1.476563
| 1
|
applications/plugins/Flexible/python/Flexible/sml.py
|
sofa-framework/issofa
| 0
|
12777456
|
<reponame>sofa-framework/issofa
import SofaPython.sml
def getSolidSkinningIndicesAndWeights(solidModel, skinningArmatureBoneIndexById) :
""" Construct the indices and weights vectors for the skinning of solidModel
"""
indices = dict()
weights = dict()
for skinning in solidModel.skinnings:
currentBoneIndex = skinningArmatureBoneIndexById[skinning.solid.id]
for index,weight in zip(skinning.index, skinning.weight):
if not index in indices:
indices[index]=list()
weights[index]=list()
indices[index].append(currentBoneIndex)
weights[index].append(weight)
#TODO fill potential holes in indices/weights ?
return (indices, weights)
| 2.484375
| 2
|
numtotext.py
|
teko424/num-to-eng
| 0
|
12777457
|
<reponame>teko424/num-to-eng
def two_digits(n):
nums = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
nums_2 = ["twen", "thir", "for", "fif", "six", "seven", "eigh", "nine"]
uniqteens = ["ten", "eleven", "twelve"]
teens = ["thir", "four", "fif", "six", "seven", "eigh", "nine"]
if str(n)[0] == "1":
if str(n)[1] == "0" or str(n)[1] == "1" or str(n)[1] == "2":
return uniqteens[int(str(n)[1])]
else:
return teens[int(str(n)[1]) - 3] + "teen"
else:
if str(n)[1] == "0":
return f"{nums_2[int(str(n)[0]) - 2]}ty"
else:
return f"{nums_2[int(str(n)[0]) - 2]}ty {nums[int(str(n)[1])]}"
def num_to_eng(n):
nums = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
if len(str(n)) == 1:
print(nums[n])
elif len(str(n)) == 2:
if len(str(n)) == 2:
print(two_digits(n))
elif len(str(n)) == 3:
if str(n)[1] == "0":
if str(n)[2] != "0":
print(nums[int(str(n)[0])], "hundred", nums[int(str(n)[2])])
else:
print(nums[int(str(n)[0])], "hundred")
else:
print(nums[int(str(n)[0])], "hundred", two_digits(int(str(n)[-2:])))
if __name__ == "__main__":
while 1:
try:
q = int(input("type a number between 0 and 999: "))
if 0 <= q <= 999:
num_to_eng(q)
else:
print("the number must be between 0-999")
except ValueError:
print("please type only a number, nothing else")
| 3.484375
| 3
|
rllib/examples/serving/cartpole_server.py
|
77loopin/ray
| 39
|
12777458
|
<gh_stars>10-100
#!/usr/bin/env python
"""
Example of running an RLlib policy server, allowing connections from
external environment running clients. The server listens on
(a simple CartPole env
in this case) against an RLlib policy server listening on one or more
HTTP-speaking ports. See `cartpole_client.py` in this same directory for how
to start any number of clients (after this server has been started).
This script will not create any actual env to illustrate that RLlib can
run w/o needing an internalized environment.
Setup:
1) Start this server:
$ python cartpole_server.py --num-workers --[other options]
Use --help for help.
2) Run n policy clients:
See `cartpole_client.py` on how to do this.
The `num-workers` setting will allow you to distribute the incoming feed over n
listen sockets (in this example, between 9900 and 990n with n=worker_idx-1).
You may connect more than one policy client to any open listen port.
"""
import argparse
import gym
import os
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.env.policy_server_input import PolicyServerInput
from ray.rllib.examples.custom_metrics_and_callbacks import MyCallbacks
from ray.tune.logger import pretty_print
SERVER_ADDRESS = "localhost"
# In this example, the user can run the policy server with
# n workers, opening up listen ports 9900 - 990n (n = num_workers - 1)
# to each of which different clients may connect.
SERVER_BASE_PORT = 9900 # + worker-idx - 1
CHECKPOINT_FILE = "last_checkpoint_{}.out"
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, choices=["DQN", "PPO"], default="DQN")
parser.add_argument(
"--framework",
choices=["tf", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--no-restore",
action="store_true",
help="Do not restore from a previously saved checkpoint (location of "
"which is saved in `last_checkpoint_[algo-name].out`).")
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="The number of workers to use. Each worker will create "
"its own listening socket for incoming experiences.")
parser.add_argument(
"--chatty-callbacks",
action="store_true",
help="Activates info-messages for different events on "
"server/client (episode steps, postprocessing, etc..).")
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# `InputReader` generator (returns None if no input reader is needed on
# the respective worker).
def _input(ioctx):
# We are remote worker or we are local worker with num_workers=0:
# Create a PolicyServerInput.
if ioctx.worker_index > 0 or ioctx.worker.num_workers == 0:
return PolicyServerInput(
ioctx, SERVER_ADDRESS, SERVER_BASE_PORT + ioctx.worker_index -
(1 if ioctx.worker_index > 0 else 0))
# No InputReader (PolicyServerInput) needed.
else:
return None
# Trainer config. Note that this config is sent to the client only in case
# the client needs to create its own policy copy for local inference.
config = {
# Indicate that the Trainer we setup here doesn't need an actual env.
# Allow spaces to be determined by user (see below).
"env": None,
# TODO: (sven) make these settings unnecessary and get the information
# about the env spaces from the client.
"observation_space": gym.spaces.Box(
float("-inf"), float("inf"), (4, )),
"action_space": gym.spaces.Discrete(2),
# Use the `PolicyServerInput` to generate experiences.
"input": _input,
# Use n worker processes to listen on different ports.
"num_workers": args.num_workers,
# Disable OPE, since the rollouts are coming from online clients.
"input_evaluation": [],
# Create a "chatty" client/server or not.
"callbacks": MyCallbacks if args.chatty_callbacks else None,
}
# DQN.
if args.run == "DQN":
# Example of using DQN (supports off-policy actions).
trainer = DQNTrainer(
config=dict(
config, **{
"learning_starts": 100,
"timesteps_per_iteration": 200,
"model": {
"fcnet_hiddens": [64],
"fcnet_activation": "linear",
},
"n_step": 3,
"framework": args.framework,
}))
# PPO.
else:
# Example of using PPO (does NOT support off-policy actions).
trainer = PPOTrainer(
config=dict(
config, **{
"rollout_fragment_length": 1000,
"train_batch_size": 4000,
"framework": args.framework,
}))
checkpoint_path = CHECKPOINT_FILE.format(args.run)
# Attempt to restore from checkpoint, if possible.
if not args.no_restore and os.path.exists(checkpoint_path):
checkpoint_path = open(checkpoint_path).read()
print("Restoring from checkpoint path", checkpoint_path)
trainer.restore(checkpoint_path)
# Serving and training loop.
while True:
print(pretty_print(trainer.train()))
checkpoint = trainer.save()
print("Last checkpoint", checkpoint)
with open(checkpoint_path, "w") as f:
f.write(checkpoint)
| 2.984375
| 3
|
event/urls.py
|
vis7/connection
| 1
|
12777459
|
<gh_stars>1-10
from django.urls import path
from .views import (
EventCreateView, EventUpdateView, EventDeleteView, EventDetailView, EventListView
)
app_name = 'event'
urlpatterns = [
path('create/', EventCreateView.as_view(), name='event_create'),
path('<int:pk>/update/', EventUpdateView.as_view(), name='event_update'),
path('<int:pk>/delete/', EventDeleteView.as_view(), name='event_delete'),
path('<int:pk>/', EventDetailView.as_view(), name='event_detail'),
path('event_list/', EventListView.as_view(), name='event_list')
]
| 1.703125
| 2
|
cheminfo/openbabel/amon_f.py
|
binghuang2018/aqml
| 19
|
12777460
|
#!/usr/bin/env python
"""
Enumerate subgraphs & get amons
"""
import aqml.cheminfo.math as cim
import aqml.cheminfo.rw.pdb as crp
import aqml.cheminfo.graph as cg
import networkx as nx
from itertools import chain, product
import numpy as np
import os, re, copy, time
#from rdkit import Chem
import openbabel as ob
import pybel as pb
from aqml.cheminfo import *
import aqml.cheminfo.openbabel.obabel as cib
from aqml.cheminfo.rw.ctab import write_ctab
#Todo
# stereochemistry: e.g., "CC(=C)C(CC/C(=C\COC1=CC=CC=C1)/C)Br"
# "NC(=O)[C@H](CCCCN)NC(=O)[C@H](CCCN=C(N)N)"
#global dic_smiles
#dic_smiles = {6:'C', 7:'N', 8:'O', 14:'Si', 15:'P', 16:'S'}
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
class RawMol(object):
"""
molecule object with only `zs & `coords
"""
def __init__(self, zs, coords):
self.zs = zs
self.coords = coords
def generate_coulomb_matrix(self):
""" Coulomb matrix"""
na = len(self.zs)
mat = np.zeros((na,na))
ds = ssd.squareform( ssd.pdist(self.coords) )
np.fill_diagonal(ds, 1.0)
X, Y = np.meshgrid(self.zs, self.zs)
mat = X*Y/ds
np.fill_diagonal(mat, -np.array(self.zs)**2.4 )
L1s = np.linalg.norm(mat, ord=1, axis=0)
ias = np.argsort(L1s)
self.cm = mat[ias,:][:,ias].ravel()
class Parameters(object):
def __init__(self, wg, fixGeom, k, k2, ivdw, dminVDW, \
forcefield, thresh, do_ob_ff, idiff, iters):
self.wg = wg
self.fixGeom = fixGeom
self.ff = forcefield
self.k = k
self.k2 = k2
self.ivdw = ivdw
self.dminVDW = dminVDW
# self.threshDE = threshDE
self.thresh = thresh
self.do_ob_ff = do_ob_ff
self.iters = iters
self.idiff = idiff
def merge(Ms): #Mli1, Mli2):
"""merge two or more `ctab"""
nas = []
zs = []; coords = []; charges = []; boms = []
for M in Ms:
zs1, coords1, bom1, charges1 = M
zs.append( zs1)
na1 = len(zs1); nas.append(na1)
coords.append( coords1)
charges.append( charges1)
boms.append(bom1)
zs = np.concatenate( zs )
coords = np.concatenate(coords, axis=0)
charges = np.concatenate(charges)
na = sum(nas); nm = len(nas)
bom = np.zeros((na,na), np.int)
ias2 = np.cumsum(nas)
ias1 = np.array([0] + list(ias2[:-1]))
for i in range(nm):
ia1 = ias1[i]; ia2 = ias2[i]
bom[ia1:ia2,ia1:ia2] = boms[i]
return zs, coords, bom, charges
class Sets(object):
def __init__(self, param):
self.cans = [] #cans
self.ms = [] #ms
self.rms = [] #rms
self.es = [] #es
self.nhas = [] #nhas
self.ms0 = [] #ms0
self.maps = [] #maps
self.cms = [] # coulomb matrix
self.param = param
def check_eigval(self):
""" check if the new kernel (after adding one molecule) has
some very small eigenvalue, i.e., if it's true, it means that
there are very similar molecules to the newcomer, thus it won't
be included as a new amon"""
iok = True
thresh = self.param.thresh
def update(self, ir, can, Mli):
"""
update `Sets
var's
==============
Mli -- Molecule info represented as a list
i.e., [zs, coords, bom, charges]
"""
zs, coords, bom, charges = Mli
rmol = RawMol(zs, coords)
if self.param.idiff == 1: rmol.generate_coulomb_matrix()
nha = (zs > 1).sum()
self.ncan = len(self.cans)
if can in self.cans:
ican = self.cans.index( can )
# for molecule with .LE. 3 heavy atoms, no conformers
if (not self.param.fixGeom) and nha <= 3:
# but u still need to tell if it belongs to the
# `ir-th query molecule (so, the amon `m0 might
# have appeared as an amon of another query molecule
# considered previously.
# Note that we use a 3-integer list for labeling the
# generated amons, i.e., [ir,ican,iconfonmer].
amon_idx = [ir, ican, 0]
if amon_idx not in self.maps:
self.maps.append( amon_idx )
else:
m0, m, ei = self.Opt(Mli)
ms_i = self.ms[ ican ] # stores the updated geom
rms_i = self.rms[ ican ]
ms0_i = self.ms0[ ican ] # stores the original geom
nci = len(ms_i)
es_i = self.es[ ican ]
inew = True
if self.param.idiff == 0: # use difference of energy as citeria
dEs = np.abs( np.array(es_i) - ei )
if np.any( dEs <= self.param.thresh ): inew = False
elif self.param.idiff == 1:
xs = np.array([ rmol.cm, ] )
ys = np.array([ ma.cm for ma in self.rms[ican] ])
#print ' -- ', xs.shape, ys.shape, can
drps = ssd.cdist(xs, ys, 'cityblock')[0]
if np.any( drps <= self.param.thresh ): inew = False
elif self.param.idiff == 2:
if not self.check_eigval():
inew = False
else:
raise '#ERROR: not supported `idiff'
if inew:
self.ms[ ican ] = ms_i + [m, ]
self.rms[ ican ] = rms_i + [ rmol, ]
self.ms0[ ican ] = ms0_i + [m0, ]
self.es[ ican ] = es_i + [ei, ]
self.maps.append( [ir, ican, nci] )
else:
m0, m, ei = self.Opt(Mli)
self.maps.append( [ir, self.ncan, 0] )
self.cans.append( can )
self.nhas.append( nha )
self.ms.append( [m, ] )
self.rms.append( [rmol, ] )
self.ms0.append( [m0, ] )
self.es.append( [ei, ] )
self.ncan += 1
def update2(self, ir, can, Mli):
"""
update mol set if we need SMILES only
"""
self.ncan = len(self.cans)
zs = Mli[0]
nha = (zs > 1).sum()
if can not in self.cans:
print '++', can #, '\n\n'
self.maps.append( [ir, self.ncan, 0] )
self.cans.append( can )
self.nhas.append( nha )
self.ncan += 1
else:
ican = self.cans.index( can )
entry = [ir, ican, 0]
if entry not in self.maps:
self.maps.append( entry )
#print ' -- maps = ', self.maps
def Opt(self, Mli):
"""
postprocess molecular fragement retrieved
from parent molecule by RDKit
"""
#import io2.mopac as im
import tempfile as tpf
zs, coords, bom, charges = Mli
ctab = oe.write_sdf_raw(zs, coords, bom, charges)
# get RDKit Mol first
m0 = Chem.MolFromMolBlock( ctab, removeHs=False ) # plz keep H's
m0_copy = copy.deepcopy(m0)
rd = cir.RDMol( m0_copy, forcefield=self.param.ff )
if self.param.wg:
# the default case, use openbabel to do constrained optimization
if self.param.do_ob_ff:
ob1 = cib.Mol( ctab, fmt='sdf' )
ob1.fixTorsionOpt(iconstraint=3, ff="MMFF94", \
optimizer='cg', steps=[30,90], ic=True)
rd = cir.RDMol( ob1.to_RDKit(), forcefield=self.param.ff )
else:
# u may prefer to do a partial optimization using PM7 in MOPAC
# for those H atoms and their neighboring heavy atoms
pass # no ff opt
if hasattr(rd, 'energy'):
e = rd.energy
else:
e = rd.get_energy()
m = rd.m
return m0, m, e
def _sort(self):
""" sort Mlis """
maps = np.array(self.maps)
ncan = len(self.cans)
seqs = np.arange(ncan)
nhas = np.array(self.nhas)
ncs = [ len(ms_i) for ms_i in self.ms ]
cans = np.array(self.cans)
nhas_u = []
ncs_u = []
seqs_u = []
cans_u = []
ms_u = []; ms0_u = []
# now sort the amons by the number of heavy atoms
for i in range(1, self.param.k2+1):
seqs_i = seqs[ i == nhas ]
cans_i = cans[ seqs_i ]
seqs_j = seqs_i[ np.argsort(cans_i) ]
seqs_u += list( seqs_j )
for j in seqs_j:
cans_u.append( cans[j] )
ms_j = self.ms[j]; ms0_j = self.ms0[j]
ncj = len(ms_j)
ncs_u.append( ncj )
nhas_u.append( nhas[j] )
ms_u.append( ms_j ); ms0_u.append( ms0_j )
seqs_u = np.array(seqs_u)
# now get the starting idxs of conformers for each amon
ias2 = np.cumsum(ncs_u)
ias1 = np.concatenate( ([0,],ias2[:-1]) )
# now get the maximal num of amons one molecule can possess
nt = 1+maps[-1,0]; namons = []
for i in range(nt):
namon = (maps[:,0] == i).sum()
namons.append(namon)
namon_max = max(namons)
# `maps_u stores the amon idx for each target molecule
# (Note: any conformer is an amon)
maps_u = np.zeros((nt, namon_max))
for i in range(nt):
filt_i = (maps[:,0] == i)
maps_i = maps[filt_i, 1:]
jcnt = 0
for j in range(namons[i]):
jcan, jc = maps_i[j,:] # `jcan: the old idx of can
jcan_u = seqs[ seqs_u == jcan ] # new idx of can
maps_u[i, jcnt] = ias1[jcan_u] + jc
jcnt += 1
self.ms = ms_u
self.ms0 = ms0_u
self.cans = cans_u
self.nhas = nhas_u
self.ncs = ncs_u
self.maps = maps_u
def _sort2(self):
""" sort Mlis for wg = False"""
maps = np.array(self.maps)
ncan = len(self.cans)
seqs = np.arange(ncan)
nhas = np.array(self.nhas)
cans = np.array(self.cans)
nhas_u = []
seqs_u = []
cans_u = []
# now sort the amons by the number of heavy atoms
for i in range(1, self.param.k2+1):
seqs_i = seqs[ i == nhas ]
cans_i = cans[ seqs_i ]
seqs_j = seqs_i[ np.argsort(cans_i) ]
seqs_u += list( seqs_j )
for j in seqs_j:
cans_u.append( cans[j] )
nhas_u.append( nhas[j] )
seqs_u = np.array(seqs_u)
#print 'maps = ',maps
# now get the maximal num of amons one molecule can possess
nt = maps[-1,0]+1; namons = []
for i in range(nt):
namon = (maps[:,0] == i).sum()
namons.append(namon)
namon_max = max(namons)
# `maps_u stores the amon idx for each target molecule
# (Note: any conformer is an amon)
maps_u = np.zeros((nt, namon_max))
for i in range(nt):
filt_i = (maps[:,0] == i)
maps_i = maps[filt_i, 1:]
jcnt = 0
for j in range(namons[i]):
jcan = maps_i[j,1] # `jcan: the old idx of can
jcan_u = seqs[ seqs_u == jcan ] # new idx of can
maps_u[i, jcnt] = jcan_u
jcnt += 1
self.cans = cans_u
self.nhas = nhas_u
self.maps = maps_u
self.ncs = np.ones(ncan).astype(np.int)
def accommodate_chgs(chgs, bom):
"""update bom based on `chgs
e.g., C=N#N, bond orders = [2,3],
Considering that `chgs = [0,+1,-1],
bond orders has to be changed to [2,2]"""
bom2 = copy.copy(bom)
na = len(chgs)
ias = np.arange(na)
ias1 = ias[chgs == 1]
for i in ias1:
iasc = ias[ np.logical_and(chgs==-1, bom[i]>0) ]
nac = len(iasc)
if nac > 0:
#assert nac == 1
j = iasc[0]
bij = bom[i,j] - 1
bom2[i,j] = bij
bom2[j,i] = bij
return bom2
class vars(object):
def __init__(self, bosr, zs, chgs, tvs, g, coords):
self.bosr = bosr
self.zs = zs
self.chgs = chgs
self.tvs = tvs
self.g = g
self.coords = coords
class MG(vars):
def __init__(self, bosr, zs, chgs, tvs, g, coords, use_bosr=True):
"""
use_bosr: set to True for generating amons, i.e., we need the
bond orders between the atom_i and all its neighbors,
where `i runs through 1 to N_A;
It must be set to False when inferring the BO's between
atoms given only the xyz file, i.e., with graph being
the only input
"""
vars.__init__(self, bosr, zs, chgs, tvs, g, coords)
self.use_bosr = use_bosr
def update_m(self, once=True, debug=False, icon=False):
import aqml.cheminfo.fortran.famon as cf
g = self.g
chgs = self.chgs
vs = g.sum(axis=0).astype(np.int)
tvs = self.tvs # `tvs has been modified according to `chgs
zs = self.zs
bosr = self.bosr
na = len(zs)
ias = np.arange(na)
#icon = True
if icon:
print ' zs = ', zs
print 'tvs = ', tvs
print 'dvs = ', tvs - vs
#print 'g = ', g
#t1 = time.time()
#print ' ## e1'
nrmax = na/2
nbmax = (g>0).sum()/2
iok, bom = cf.update_bom(nrmax,nbmax,zs,tvs,g,icon)
if icon: print ' +++ Passed with `iok = ', iok
#t2 = time.time()
#print ' update_m: ', t2-t1
#print ' ** iok = ',iok
#print ' ** bom = ', bom
if not iok:
#print ' zs = ', zs
#print ' vs = ', vs
#print 'tvs = ', tvs
#print ''
return [],[]
boms = [bom]
cans = []; ms = []
iok = True
for bom in boms:
# note that the order of calling `get_bos() and `accommodate_chgs()
# matters as `bosr was obtained based on modified `bom, i.e., all
# pairs of positive & negative charges (the relevant two atoms are
# bonded) were eliminated
bos = get_bos(bom)
# now restore charges for case, e.g., NN bond in C=N#N, or -N(=O)=O
bom_U = accommodate_chgs(chgs, bom)
vs = bom_U.sum(axis=0)
# for query molecule like -C=CC#CC=C-, one possible amon
# is >C-C-C-C< with dvs = [1,2,2,1] ==> >C=C=C=C<, but
# apparently this is not acceptable!! We use `obsr to
# kick out these fragments if `use_bosr is set to .true.
#ipass = True
if self.use_bosr:
#print ' -- bos = ', bos
if np.any(bos[zs>1] != bosr):
#print ' bosr = ', bosr, ', bos = ', bos[zs>1]
#ipass = False
continue
t1 = time.time()
# handle multivalent cases
# struct obabel_amons
# 1) R-N(=O)=O, O=[SH2]=O
# 2) R1-P(=O)(R2)(R3)
# 3) R-S(=O)-R,
# 4) R-S(=O)(=O)-R
# 5) R-Cl(=O)(=O)(=O), one possible amon is
# "O=[SH2]=O", however,
# openbabel cannot succeed to add 2 extra H's. We can circumvent this
# by using isotopes of H's
isotopes = []
zsmv = [7,15,16,17]
vsn = [3,3,2,1]
zsc = np.intersect1d(zs, zsmv)
if zsc.shape[0] > 0:
nheav = (zs > 1).sum()
ias = np.arange(len(zs))
for ia in range(nheav):
if (zs[ia] in zsmv) and (vs[ia]>vsn[ zsmv.index(zs[ia]) ]):
jas = ias[bom_U[ia] > 0]
for ja in jas:
if zs[ja] == 1:
isotopes.append(ja)
if na <= 100:
blk = write_ctab(zs, chgs, bom_U, self.coords, isotopes=isotopes, sdf=None)
m = obconv(blk)
else:
blk_pdb = crp.write_pdb( (zs,self.coords,chgs,bom_U) )
m = obconv(blk_pdb,'pdb')
#t2 = time.time()
#print ' |_ dt1 = ', t2-t1
can_i = pb.Molecule(m).write('can').split('\t')[0]
#if not ipass: print ' ++ can_i = ', can_i
#if np.any(bos[zs>1] != bosr):
# print '##### ', can_i, ', ', bos[zs>1], ', ', bosr
# continue
# remove isotopes
sp = r"\[[1-3]H\]"
sr = "[H]"
_atom_name_pat = re.compile(sp)
can_i = _atom_name_pat.sub(sr, can_i)
#print ' ++ zs, can, isotopes = ', zs, can_i, isotopes
#t3 = time.time()
#print ' |_ dt2 = ', t3-t2
#print ' __ can = ', can_i
if can_i not in cans:
cans.append(can_i)
ms.append(m)
#if 'CC(C)C' in cans: print ' Alert!!!'
return cans, ms
def get_coords(m):
coords = [] # np.array([ ai.coords for ai in pb.Molecule(m).atoms ])
na = m.NumAtoms()
for i in range(na):
ai = m.GetAtomById(i)
coords.append( [ ai.GetX(), ai.GetY(), ai.GetZ() ] )
return np.array(coords)
def get_bom(m):
"""
get connectivity table
"""
na = m.NumAtoms()
bom = np.zeros((na,na), np.int)
for i in range(na):
ai = m.GetAtomById(i)
for bond in ob.OBAtomBondIter(ai):
ia1 = bond.GetBeginAtomIdx()-1; ia2 = bond.GetEndAtomIdx()-1
bo = bond.GetBO()
bom[ia1,ia2] = bo; bom[ia2,ia1] = bo
return bom
def clone(m):
m2 = pb.Molecule(m).clone
return m2.OBMol
def check_hydrogens(m):
mu = pb.Molecule(m).clone # a copy
mu.addh()
m2 = mu.OBMol
return m.NumAtoms() == m2.NumAtoms()
def obconv(s,fmt='sdf'):
""" convert string(s) to molecule given a format
e.g, 'CCO','smi'
or sdf_file_content,'sdf' """
conv = ob.OBConversion()
m = ob.OBMol()
#assert type(s) is str
conv.SetInFormat(fmt)
conv.ReadString(m,s)
return m
def get_bos(bom):
na = bom.shape[0]
bosr = []
for i in range(na):
bosi = bom[i]
t = bosi[ bosi > 0 ]; t.sort()
n = len(t)
v = 0
for j in range(n):
v += t[j]*10**j
bosr.append( v )
return np.array(bosr,np.int)
class mol(object):
def __init__(self, m0):
na = m0.NumAtoms()
m1 = clone(m0); m1.DeleteHydrogens()
self.m0 = m0
#print 'self.m = ', m1
self.m = m1
chgs = []; zs = []
for i in range(na):
ai = m0.GetAtomById(i)
zi = ai.GetAtomicNum(); zs.append( zi )
chgi = ai.GetFormalCharge(); chgs.append( chgi )
self.zs = np.array(zs)
self.bom = get_bom(m0)
self.nheav = (self.zs > 1).sum()
self.ias = np.arange( len(self.zs) )
self.ias_heav = self.ias[ self.zs > 1 ]
try:
self.coords = get_coords(m0)
except:
self.coords = np.zeros((na,3))
self.chgs = np.array(chgs, np.int)
#if 1 in zs:
# idxh = zs.index( 1 )
# if np.any(self.zs[idxh+1:] != 1):
# # not all H apprear appear at the end, u have to sort it
# self.sort()
# check if there is any XH bond appear before XY bond
ihsmi = False
obsolete = """nb = m0.NumBonds(); ibs = []
for ib in range(nb):
bi= m0.GetBondById(ib)
j,k = [ bi.GetBeginAtomIdx(), bi.GetEndAtomIdx() ] # starts from 1
if j == 1 or k == 1:
ibs.append(ib) #[zs[j-1],zs[k-1]])
ibs = np.array(ibs,np.int)
if not np.all( ibs[1:]-ibs[:-1] == 1 ): ihsmi = True"""
# a even simpler way to tell if H atom/bond appears before X
nb = m1.NumBonds()
for ib in range(nb):
bi = m1.GetBondById(ib)
if bi == None:
ihsmi = True; break
# sort atoms & bonds so that H atom or HX bond always appear at the end
if ihsmi: self.sort()
vs = self.bom.sum(axis=0)
#print ' * vs = ', vs
self.vs = vs
if np.any(self.chgs != 0):
#print ' ** update bom due to charges'
self.eliminate_charges()
else:
# figure out charges for some special cases like
# R-N(=O)=O, O=N(=C)C=C, R-C=N#N, etc as Openbabel
# is not intelligent enough; for packages like
# RDKit or OEChem, you don't have to do this
self.recover_charges()
# print ' -- chgs = ', self.chgs
#print ' ** vs = ', self.vs
bom_heav = self.bom[ self.ias_heav, : ][ :, self.ias_heav ]
# print 'bom_heav = ', bom_heav
self.vs_heav = bom_heav.sum(axis=0)
self.cns_heav = ( bom_heav > 0 ).sum(axis=0)
# get formal charges
self.cns = ( self.bom > 0).sum(axis=0)
self.nhs = self.vs[:self.nheav] - self.vs_heav #- self.chgs[:self.nheav]
self.dvs = self.vs_heav - self.cns_heav
# get bosr, i.e., bond order (reference data) array
# concatenated into a integer
self.bosr = get_bos(self.bom)
self.dbnsr = (self.bom==2).sum(axis=0)
#print ' -- bosr = ', self.bosr
self.na = na
def sort(self):
""" sort atoms so that H's appear at the end
"""
nheav = self.nheav
ias_heav = list(self.ias_heav)
g = np.zeros((nheav, nheav))
xhs = [] # X-H bonds
ih = nheav
coords = []; coords_H = []
chgs = []; chgs_H = []
dic = dict( zip(ias_heav, range(nheav)) )
# print ' *** dic = ', dic
for i, ia in enumerate( ias_heav ):
coords.append( self.coords[ia] )
chgs.append( self.chgs[ia] )
jas = self.ias[ self.bom[ia,:] > 0 ]
for ja in jas:
if self.zs[ja] == 1:
coords_H.append( self.coords[ja] )
chgs_H.append( self.chgs[ja] )
xhs.append([i,ih]); ih += 1
else:
g[i,dic[ja]] = g[dic[ja],i] = self.bom[ia,ja]
coords_U = np.concatenate( (coords, coords_H) )
self.coords = coords_U
chgs_U = np.concatenate( (chgs, chgs_H) )
self.chgs = chgs_U
g2 = np.zeros((ih,ih))
g2[:nheav, :nheav] = g
for xh in xhs:
i,j = xh
g2[i,j] = g2[j,i] = 1
self.bom = g2
nh = ih - nheav
zsU = np.array( list(self.zs[ias_heav]) + [1,]*nh )
self.zs = zsU
self.ias_heav = self.ias[ self.zs > 1 ]
blk = write_ctab(zsU, chgs_U, g2, coords_U, sdf=None)
m0 = obconv(blk)
m1 = clone(m0)
# print ' *** ', Chem.MolToSmiles(m1)
m1.DeleteHydrogens()
self.m0 = m0; self.m = m1
def eliminate_charges(self):
"""update bom based on `chgs
e.g., bom of C=[N+]=[N-] will be converted to bom of C=N#N
based on `chgs = [0,+1,-1]
Note that only bom and the resulting `vs will be updated, no
changes regarding the SMILES string (i.e., we still prefer
a SMILES string like C=[N+]=[N-] instead of C=N#N"""
bom2 = copy.copy(self.bom)
vs2 = self.vs
ias1 = self.ias[self.chgs == 1]
for i in ias1:
iasc = self.ias[ np.logical_and(self.chgs==-1, self.bom[i]>0) ]
nac = len(iasc)
if nac > 0:
#print ' __ yeah'
#assert nac == 1
j = iasc[0]
bij = self.bom[i,j] + 1
bom2[i,j] = bij
bom2[j,i] = bij
vs2[i] = vs2[i]+1; vs2[j] = vs2[j]+1
self.bom = bom2
#print ' __ bom2 = ', bom2
self.vs = vs2 #bom2.sum(axis=0) #vs2
def recover_charges(self):
"""figure out the charges of N atoms contraining that
all have a valence of 3. E.g., for "CC=CC=N#N", the final
charges of atoms is [0,0,0,0,1,-1], corresponding to the
SMILES string of "CC=CC=[N+]=[N-]". It's similar for "CCN(=O)=O".
"""
bom2 = copy.copy(self.bom)
vs2 = self.vs
ias1 = self.ias[ np.logical_and(vs2 == 5, self.zs == 7) ]
chgs = self.chgs
for ia in ias1:
bom_ia = bom2[ia]
jas = self.ias[ bom_ia >=2 ]
bosj = bom_ia[ bom_ia >= 2 ]
if len(jas) == 2:
zsj = self.zs[ jas ]
if set(bosj) == set([2]) or set(bosj) == set([2,3]):
# e.g., O=N(=C)C=C, O=N(=O)C CC=CC=N#N
for ja in jas:
if (bom2[ja] > 0).sum() == 1:
chgs[ia] = 1; chgs[ja] = -1
break
else:
raise '#ERROR: wierd case!'
self.chgs = chgs
def get_ab(self):
"""
For heav atoms only
get atoms and bonds info
a2b: bond idxs associated to each atom
b2a: atom idxs associated to each bond
"""
# it's not necessary to exclude H's here as H's apprear at the end
b2a = [] #np.zeros((self.nb,2), np.int)
ibs = []
nb = self.m.NumBonds()
for ib in range(nb):
bi = self.m.GetBondById(ib)
i, j = bi.GetBeginAtomIdx()-1, bi.GetEndAtomIdx()-1
if self.zs[i] > 1 and self.zs[j] > 1:
ib_heav = bi.GetIdx()
b2a.append( [i,j] )
#assert len(b2a) == ib_heav+1, '#ERROR: not all H apprear at the end?'
b2a = np.array(b2a, np.int)
# assume at most 7 bonds for an atom (i.e., IF7 molecule)
a2b = -np.ones((self.nheav, 7), np.int) # -1 means no corresponding bond
for ia in self.ias_heav:
ai = self.m.GetAtomById(ia)
icnt = 0
for bi in ob.OBAtomBondIter(ai):
ib = bi.GetId()
if ib <= ib_heav: #np.all( self.zs[b2a[ib]] > 1 ):
a2b[ia, icnt] = ib
icnt += 1
return a2b, b2a
def remove_charge(m):
# obabel molecule as input
dic = {}
for ai in ob.OBMolAtomIter(m):
idx = ai.GetId()
vi = ai.GetImplicitValence()
chgi = ai.GetFormalCharge()
assert abs(chgi) <= 1
dic[ idx ] = chgi
if chgi in [1,-1]:
chgs = []
for aj in ob.OBAtomAtomIter(ai):
jdx = aj.GetId()
chgj = aj.GetFormalCharge()
dic[ jdx ] = chgj
chgs.append( chgj )
if len(chgs) > 0 and np.all(np.array(chgs,np.int) == 0):
ai.SetFormalCharge( 0 )
# reset valence for positively charged atom
ai.SetImplicitValence( vi-chgi )
# to continue, you need to remove one H atom
# and reassign the values of atom indices;
# Alternatively, simply return an updated SMILES
#if chgi == 1:
# # remove one hydrogen atom from, say [NH3+]
pym = pb.Molecule(m)
su = pym.write('can')
#print ' ++ ', su
return su
def check_elements(zs):
# metals are all excluded, including
# Li,Ba,Mg,K,Ca,Rb,Sr,Cs,Ra and
# Sc-Zn
# Y-Cd
# La-Lu, Hf-Hg
zsa = [3,11,12,19,20,37,38,55,56] + \
range(21,31) + \
range(39,49) + \
range(57,81) + \
range(89,113) # Ac-Lr, Rf-Cn
return np.all([ zi not in zsa for zi in zs ])
class amon(object):
"""
use openbabel only
"""
def __init__(self, s, k, k2=None, wg=False, ligand=None, \
fixGeom=False, ikeepRing=True, \
allow_isotope=False, allow_charge=False, \
allow_radical=False):
"""
ligand -- defaulted to None; otherwise a canonical SMILES
has to be specified
vars
===============
s -- input string, be it either a SMILES string or sdf file
k -- limitation imposed on the number of heav atoms in amon
"""
if k2 is None: k2 = k
self.k = k
self.k2 = k2
self.wg = wg
self.fixGeom = fixGeom
self.ikeepRing = ikeepRing
iok = True # shall we proceed?
if os.path.exists(s):
m0 = obconv(s,s[-3:])
# set isotope to 0
# otherwise, we'll encounter SMILES like 'C[2H]',
# and error correspondently.
# In deciding which atoms should be have spin multiplicity
# assigned, hydrogen atoms which have an isotope specification
# (D,T or even 1H) do not count. So SMILES N[2H] is NH2D (spin
# multiplicity left at 0, so with a full content of implicit
# hydrogens), whereas N[H] is NH (spin multiplicity=3). A
# deuterated radical like NHD is represented by [NH][2H].
na = m0.NumAtoms()
if not allow_isotope:
for i in range(na):
ai = m0.GetAtomById(i); ai.SetIsotope(0)
# add lines below to tell if HX bond appears before some heav atom bonds
# _____________
#
#
assert check_hydrogens(m0), '#ERROR: some hydrogens are missing'
coords0 = get_coords(m0)
pym = pb.Molecule(m0).clone
# check consistency
if pym.charge != 0 and (not allow_charge): iok = False
if pym.spin > 1 and (not allow_radical): iok = False
m = pym.OBMol; m.DeleteHydrogens()
else:
if not allow_isotope:
# remove isotopes
patts = [r"\[[1-3]H\]", r"\[[1-9]*[1-9]+"]
# e.g., C1=C(C(=O)NC(=O)N1[C@H]2[C@H]([C@@H]([C@H](O2)CO)O)F)[124I]
# [3H]C
# CN([11CH3])CC1=CC=CC=C1SC2=C(C=C(C=C2)C#N)N
subs = ["", "["]
for ir in range(2):
sp = patts[ir]
sr = subs[ir]
_atom_name_pat = re.compile(sp)
s = _atom_name_pat.sub(sr,s)
# There exists one anoying bug of `openbabel, i.e.,
# for some SMILES string, the program halts when trying to convert
# from SMILES to Mol. E.g., "CCCC[C@@H](C(=O)N[C@@H](C(C)CC)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](C(C)CC)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](CCCCN)C(=O)N[C@@H](CCC(=O)N)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](CCCCN)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](CCCCN)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](CCC(=O)O)C(=O)N[C@@H](C)C(=O)N[C@@H](C)C(=O)NC1CCC(=O)NCCCC[C@@H](NC(=O)[C@H](NC(=O)[C@@H](NC1=O)CC(=O)N)CCCN=C(N)N)C(=O)N[C@@H](CC(C)C)C(=O)N[C@@H](CC(C)C)C(=O)NC(CC(=O)O)C(=O)N[C@](C)(CC(C)C)C(=O)N[C@H](C(C)CC)C(=O)N)NC(=O)[C@H](CCCCN)NC(=O)[C@H](CCCN=C(N)N)NC(=O)[C@H](CC(C)C)NC(=O)[C@@](C)(CC(C)C)NC(=O)[C@H](CC2=CNC=N2)NC(=O)[C@@H](CC3=CC=CC=C3)NC(=O)[C@H](CO)NC(=O)[C@H](CC(C)C)NC(=O)[C@H](CC(=O)O)NC(=O)C"
# To circumvent this, we have to remove all stereochemistry first
pat = re.compile(r"\[(\w+?)@@?\w?\]")
matches = list( set( pat.findall(s) ) )
for match in matches:
_pat = re.compile(r"\[%s@@?\w?\]"%match)
s = _pat.sub(match, s)
m = obconv(s,'smi')
pym = pb.Molecule(m).clone
if not allow_radical:
if pym.spin > 1: iok = False
# print ' ++ 3'
if not allow_charge:
# now remove charge
su = remove_charge(m)
m = obconv(su,'smi')
m0 = clone(m)
m0.AddHydrogens()
# print ' ++ 5'
if iok:
zs = [ ai.atomicnum for ai in pym.atoms ]
if not check_elements(zs):
iok = False
self.iok = iok
if iok: self.objQ = mol(m0)
self.m0 = m0
self.m = m
def get_subm(self, las, lbs, sg):
"""
add hydrogens & retrieve coords
"""
#sets = [ set(self.objQ.bs[ib]) for ib in lbs ] # bond sets for this frag
nheav = len(las)
dic = dict( zip(las, range(nheav)) )
ih = nheav;
xhs = [] # X-H bonds
if self.wg:
coords = []; coords_H = []
for i,ia in enumerate(las):
coords.append( self.objQ.coords[ia] )
jas = self.objQ.ias[ self.objQ.bom[ia,:] > 0 ]
for ja in jas:
if self.objQ.zs[ja] == 1:
coords_H.append( self.objQ.coords[ja] )
xhs.append([i,ih]); ih += 1
else:
#if (ja not in las) or ( (ja in las) and (set(ia,ja) not in sets) ):
if (ja not in las) or ( (ja in las) and (sg[i,dic[ja]] > 0) ):
v = self.objQ.coords[ja] - coords_i
coords_H.append( coord + dsHX[z] * v/np.linalg.norm(v) )
xhs.append([i,ih]); ih += 1
coords_U = np.concatenate( (coords, coords_H) )
else:
for i,ia in enumerate(las):
jas = self.objQ.ias[ self.objQ.bom[ia,:] > 0 ]
for ja in jas:
if self.objQ.zs[ja] == 1:
xhs.append([i,ih]); ih += 1
else:
if (ja not in las) or ( (ja in las) and (sg[i,dic[ja]] == 0) ):
xhs.append([i,ih]); ih += 1
coords_U = np.zeros((ih,3))
sg_U = np.zeros((ih,ih))
sg_U[:nheav, :nheav] = sg
for xh in xhs:
i,j = xh
sg_U[i,j] = sg_U[j,i] = 1
nh = ih - nheav
bosr1 = self.objQ.bosr[las] # for heav atoms only
zs1 = np.array( list(self.objQ.zs[las]) + [1,]*nh )
chgs1 = np.array( list(self.objQ.chgs[las]) + [0,]*nh )
tvs1 = np.array( list(self.objQ.vs[las]) + [1,]*nh )
vars1 = vars(bosr1, zs1, chgs1, tvs1, sg_U, coords_U)
self.vars = vars1
def get_amons(self):
"""
tell if a given frag is a valid amon
"""
objQ = self.objQ
amons = []
smiles = []
# get amon-2 to amon-k
g0 = ( objQ.bom > 0 ).astype(np.int)
amons = []
cans = []; ms = []
a2b, b2a = objQ.get_ab()
bs = [ set(jas) for jas in b2a ]
for seed in generate_subgraphs(b2a, a2b, self.k):
# lasi (lbsi) -- the i-th list of atoms (bonds)
lasi, lbsi = list(seed.atoms), list(seed.bonds)
_lasi = np.array(lasi).astype(np.int)
#lasi.sort()
#can = Chem.MolFragmentToSmiles(objQ.m, atomsToUse=lasi, kekuleSmiles=False, \
# bondsToUse=lbsi, canonical=True)
#print ''
#print ' zs = ', objQ.zs[lasi]
#print 'tvs = ', objQ.vs[lasi]
# bondsToUse=lbsi, canonical=True)
iprt = False
bs = []
for ibx in lbsi:
bs.append( set(b2a[ibx]) )
#if iprt:
# print ' -- ibx, ias2 = ', ibx, tuple(b2a[ibx])
na = len(lasi)
if na == 1:
ia = lasi[0]; zi = objQ.zs[ ia ]
iok1 = (zi in [9, 17, 35, 53])
iok2 = ( np.any(objQ.bom[ia] >= 2) ) # -S(=O)-, -P(=O)(O)-, -S(=O)(=O)- and #N
if np.any([iok1, iok2]):
continue
can = chemical_symbols[ zi ]
if can not in cans:
cans.append( can )
# if wg:
# if not self.fixGeom:
# ms.append( ms0[can] )
# else:
# raise '#ERROR: not implemented yet'
#else:
# if wg and self.fixGeom:
continue
sg0_heav = g0[lasi,:][:,lasi]
nr0 = cg.get_number_of_rings(sg0_heav)
# property of atom in the query mol
nhs_sg0 = objQ.nhs[lasi]
# print ' cns_heav = ', objQ.cns_heav
cns_sg0_heav = objQ.cns_heav[lasi]
zs_sg = objQ.zs[ lasi ]
sg_heav = np.zeros((na,na))
for i in range(na-1):
for j in range(i+1,na):
bij = set([ lasi[i], lasi[j] ])
if bij in bs:
sg_heav[i,j] = sg_heav[j,i] = 1
nr = cg.get_number_of_rings(sg_heav)
ir = True
if self.ikeepRing:
if nr != nr0:
ir = False
cns_sg_heav = sg_heav.sum(axis=0)
# if iprt:
# print ' -- cns_sg0_heav, cns_sg_heav = ', cns_sg0_heav, cns_sg_heav
# print ' -- dvs_sg_heavy = ', objQ.dvs[lasi]
# print ' -- nhs = ', objQ.nhs[lasi]
# print zs_sg, cns_sg0_heav, cns_sg_heav #
dcns = cns_sg0_heav - cns_sg_heav # difference in coordination numbers
assert np.all( dcns >= 0 )
num_h_add = dcns.sum()
# if iprt: print ' -- dcns = ', dcns, ' nhs_sg0 = ', nhs_sg0
ztot = num_h_add + nhs_sg0.sum() + zs_sg.sum()
# if iprt: print ' -- ztot = ', ztot
chg0 = objQ.chgs[lasi].sum()
# test
#_cns2 = list(objQ.cns[lasi]); _cns2.sort()
icon = False
#if na == 7 and np.all(np.unique(zs_sg)==np.array([6,16])) and np.all(np.array(_cns2) == np.array([2,2,2,2,3,3,4])):
# icon = True; print ' ***** '
if ir and ztot%2 == 0 and chg0 == 0:
# ztot%2 == 1 implies a radical, not a valid amon for neutral query
# this requirement kills a lot of fragments
# e.g., CH3[N+](=O)[O-] --> CH3[N+](=O)H & CH3[N+](H)[O-] are not valid
# CH3C(=O)O (CCC#N) --> CH3C(H)O (CCC(H)) won't survive either
# while for C=C[N+](=O)[O-], with ztot%2 == 0, [CH2][N+](=O) may survive,
# by imposing chg0 = 0 solve the problem!
tvsi0 = objQ.vs[lasi] # for N in '-[N+](=O)[O-]', tvi=4 (rdkit)
bom0_heav = objQ.bom[lasi,:][:,lasi]
dbnsi = (bom0_heav==2).sum(axis=0) #np.array([ (bom0_heav[i]==2).sum() for i in range(na) ], np.int)
zsi = zs_sg
ias = np.arange(na)
## 0) check if envs like '>S=O', '-S(=O)(=O)-', '-P(=O)<',
## '-[N+](=O)[O-]' (it's already converted to '-N(=O)(=O)', so `ndb=2)
## 'R-S(=S(=O)=O)(=S(=O)(=O))-R', '-C=[N+]=[N-]' or '-N=[N+]=[N-]'
## ( however, '-Cl(=O)(=O)(=O)' cannot be
## recognized by rdkit )
## are retained if they are part of the query molecule
##### lines below are not necessary as `bosr will be used to assess
##### if the local envs have been kept!
## actually, the role of the few lines below is indispensible.
## E.g., for a mol c1ccccc1-S(=O)(=O)C, an amon like C=[SH2]=O
## has bos='2211', exactly the same as the S atom in query. But
## it's not a valid amon here as it's very different compared
## to O=[SH2]=O...
## Another example is C=CS(=O)(=O)S(=O)(=O)C=C, an amon like
## [SH2](=O)=[SH2](=O) has bos='2211' for both S atoms, but are
## not valid amons
tvs1 = [ 4, 6, 5, 5 ]
zs1 = [ 16, 16, 15, 7]
_dbns = [ [1], [2, 3], [1], [2] ] # number of double bonds
# | |
# | |___ 'R-S(=S(=O)=O)(=S(=O)(=O))-R',
# |
# |___ "R-S(=O)(=O)-R"
#_zsi = [ _zi for _zi in zsi ]
#_zsi.sort()
#if np.all(_zsi == np.array([8,8,8,8,16,16,16]) ):
# print '##'
# icon=True
#print ' __ zsi = ', zsi
istop = False
# now gather all atomic indices need to be compared
jas = np.array([], np.int)
for j,tvj in enumerate(tvs1):
filt = np.logical_and(tvsi0 == tvj, zsi == zs1[j])
_jas = ias[filt].astype(np.int)
jas = np.concatenate( (jas,_jas) )
# now compare the num_double_bonds
if len(jas) > 0:
dbnsj = dbnsi[jas]
dbnsrj = objQ.dbnsr[ _lasi[jas] ]
if np.any(dbnsj != dbnsrj):
istop = True; continue #break
#print 'tvj, zs1[j], dbnsj, dbns1[j] = ', tvj, zs1[j], dbnsj, dbns1[j]
#print ' __ zsi = ', zsi, ', istop = ', istop
#if istop: continue #"""
#print ' __ zsi = ', zsi
self.get_subm(lasi, lbsi, sg_heav)
vr = self.vars
## added on Aug 13, 2018
# # constraint that coordination numbers being the same
# cnsi = (vr.g > 0).sum(axis=0)[:na]
# cnsri = self.objQ.cns[lasi]
# if np.any( cnsi - cnsri != 0 ):
# continue
# else:
# print '## CN ok! ', cnsi
# added on Aug 13, 2018
so = ''
for i in range(na):
for j in range(i+1,na):
if vr.g[i,j] > 0: so += '[%d,%d],'%(i+1,j+1)
#print so
cmg = MG( vr.bosr, vr.zs, vr.chgs, vr.tvs, vr.g, vr.coords )
# test
#if icon: print ' ************* '
# for diagnosis
gr = []
nat = len(vr.zs); ic = 0
for i in range(nat-1):
for j in range(i+1,nat):
gr.append( vr.g[i,j] ); ic += 1
test = """
s = ' ########## %d'%nat
for i in range(nat): s += ' %d'%vr.zs[i]
for i in range(nat): s += ' %d'%vr.tvs[i]
for i in range(ic): s += ' %d'%gr[i]
print s
#"""
#if so == '[1,2],[1,6],[2,3],[3,4],[4,5],[4,7],[5,6],':
# icon = True
#if len(objQ.zs[lasi])==3:
# if np.all(objQ.zs[lasi] == np.array([7,7,7])): print '## we r here'
cans_i = []
cans_i, ms_i = cmg.update_m(debug=True,icon=icon)
#if icon: print ' -- cans = ', cans_i
for can_i in cans_i:
if can_i not in cans:
cans.append( can_i )
#if icon: print ''
if icon:
print '###############\n', cans_i, '############\n'
return cans
class ParentMols(object):
def __init__(self, strings, fixGeom, iat=None, wg=True, k=7,\
nmaxcomb=3,icc=None, substring=None, rc=6.4, \
isort=False, k2=7, opr='.le.', wsmi=True, irc=True, \
iters=[30,90], dminVDW= 1.2, \
idiff=0, thresh=0.2, \
keepHalogen=False, debug=False, ncore=1, \
forcefield='mmff94', do_ob_ff=True, \
ivdw=False, covPLmin=5, prefix=''):
"""
prefix -- a string added to the beginning of the name of a
folder, where all sdf files will be written to.
It should be ended with '_' if it's not empty
irc -- T/F: relax w/wo dihedral constraints
substring -- SMILES of a ligand.
Typically in a protein-ligand complex, we need
to identify the ligand first and then retrieve
all the local atoms that bind to the ligand via
vdW interaction as amons for training in ML. The
thus obtained fragment is dubbed `centre.
If `substring is assigned a string,
we will generated only amons that are
a) molecular complex; b) any atom in the centre
must be involved.
rc -- cutoff radius centered on each atom of the central
component. It's used when `icc is not None.
"""
def check_ncbs(a, b, c):
iok = False
for si in itl.product(a,b):
if set(si) in c:
iok = True; break
return iok
param = Parameters(wg, fixGeom, k, k2, ivdw, dminVDW, \
forcefield, thresh, do_ob_ff, idiff, iters)
ncpu = multiprocessing.cpu_count()
if ncore > ncpu:
ncore = ncpu
# temparary folder
#tdirs = ['/scratch', '/tmp']
#for tdir in tdirs:
# if os.path.exists(tdir):
# break
# num_molecule_total
assert type(strings) is list, '#ERROR: `strings must be a list'
nmt = len(strings)
if iat != None:
assert nmt == 1, '#ERROR: if u wanna specify the atomic idx, 1 input molecule at most is allowed'
cans = []; nhas = []; es = []; maps = []
ms = []; ms0 = []
# initialize `Sets
seta = Sets(param)
for ir in range(nmt):
print ' -- Mid %d'%(ir+1)
string = strings[ir]
obj = ParentMol(string, isort=isort, iat=iat, wg=wg, k=k, k2=k2, \
opr=opr, fixGeom=fixGeom, covPLmin=covPLmin, \
ivdw=ivdw, dminVDW=dminVDW, \
keepHalogen=keepHalogen, debug=debug)
ncbs = obj.ncbs
Mlis, iass, cans = [], [], []
# we needs all fragments in the first place; later we'll
# remove redundencies when merging molecules to obtain
# valid vdw complexes
nas = []; nasv = []; pss = []
iass = []; iassU = []
for Mli, ias, can in obj.generate_amons():
iasU = ias + [-1,]*(k-len(ias)); nasv.append( len(ias) )
Mlis.append( Mli ); iass.append( ias ); cans.append( can )
iassU.append( iasU ); pss += list(Mli[1])
nas.append( len(Mli[0]) )
nmi = len(cans)
print ' -- nmi = ', nmi
nas = np.array(nas, np.int)
nasv = np.array(nasv, np.int)
pss = np.array(pss)
iassU = np.array(iassU, np.int)
ncbsU = np.array(ncbs, np.int)
# now combine amons to get amons complex to account for
# long-ranged interaction
if wg and ivdw:
if substring != None:
cliques_c = set( oe.is_subg(obj.oem, substring, iop=1)[1][0] )
#print ' -- cliques_c = ', cliques_c
cliques = oe.find_cliques(obj.g0)
Mlis_centre = []; iass_centre = []; cans_centre = []
Mlis_others = []; iass_others = []; cans_others = []
for i in range(nmi):
#print ' %d/%d done'%(i+1, nmi)
if set(iass[i]) <= cliques_c:
Mlis_centre.append( Mlis[i] )
iass_centre.append( iass[i] )
cans_centre.append( cans[i] )
else:
Mlis_others.append( Mlis[i] )
iass_others.append( iass[i] )
cans_others.append( cans[i] )
nmi_c = len(Mlis_centre)
nmi_o = nmi - nmi_c
print ' -- nmi_centre, nmi_others = ', nmi_c, nmi_o
Mlis_U = []; cans_U = []
for i0 in range(nmi_c):
ias1 = iass_centre[i0]
t1 = Mlis_centre[i0]; nha1 = (np.array(t1[0]) > 1).sum()
for j0 in range(nmi_o):
ias2 = iass_others[j0]
t2 = Mlis_others[j0]; nha2 = np.array((t2[0]) > 1).sum()
if nha1 + nha2 <= k2 and check_ncbs(ias1, ias2, ncbs):
dmin = ssd.cdist(t1[1], t2[1]).min()
if dmin >= dminVDW:
cansij = [cans_centre[i0], cans_others[j0]]
cansij.sort()
cans_U.append( '.'.join(cansij) )
Mlis_U.append( merge(t1, t2) )
Mlis = Mlis_U; cans = cans_U
print ' -- nmi_U = ', len(Mlis)
else:
print 'dminVDW = ', dminVDW
gv,gc = fa.get_amon_adjacency(k2,nas,nasv,iassU.T,pss.T,ncbsU.T,dminVDW)
print 'amon connectivity done'
#print 'gv=',gv # 'np.any(gv > 0) = ', np.any(gv > 0)
ims = np.arange(nmi)
combs = []
for im in range(nmi):
nv1 = nasv[im]
jms = ims[ gv[im] > 0 ]
nj = len(jms)
if nj == 1:
# in this case, nmaxcomb = 2
jm = jms[0]
if nmaxcomb == 2:
# setting `nmaxcomb = 2 means to include
# all possible combinations consisting of
# two standalone molecules
comb = [im,jms[0]]; comb.sort()
if comb not in combs:
combs += [comb]
else:
# if we are not imposed with `nmaxcomb = 2,
# we remove any complex corresponding to 2) below
#
# 1) 1 --- 2 (no other frag is connected to `1 or `2)
#
# 2) 1 --- 2
# \
# \
# 3
if len(gv[jm]) == 1:
comb = [im,jm]; comb.sort()
if comb not in combs:
combs += [comb]
else:
if nmaxcomb == 2:
for jm in jms:
comb = [im,jm]; comb.sort()
if comb not in combs:
combs += [comb]
elif nmaxcomb == 3:
#for jm in jms:
# comb = [im,jm]; comb.sort()
# if comb not in combs:
# combs += [comb]
# this is the default choice and is more reasonable
# as only the most relevant local frags are included.
# Here we don't consider frags like [im,p],[im,q] as
# 1) the local envs are covered by [im,p,q]; 2) it's less
# relevant to [im,p,q]
for (p,q) in itl.combinations(jms,2):
nv2 = nasv[p]; nv3 = nasv[q]
if nv1+nv2+nv3 <= k2 and gc[p,q] == 0:
comb = [im,p,q]; comb.sort()
if comb not in combs:
combs += [comb]
print 'atom indices of all amons done'
for comb in combs:
#print comb
cans_i = [ cans[ic] for ic in comb ]; cans_i.sort()
cans.append('.'.join(cans_i))
ts_i = [ Mlis[ic] for ic in comb ]
Mlis.append( merge(ts_i) )
print 'amons now ready for filtering'
#else:
# #
ncan = len(cans)
# now remove redundancy
if wg:
#print ' cans = ', cans
for i in range(ncan):
#print '** ', cans[i], (np.array(Mlis[i][0]) > 1).sum(),\
# len(Mlis[i][0]), Mlis[i][0]
seta.update(ir, cans[i], Mlis[i])
seta._sort()
else:
for i in range(ncan):
#print ' ++ i, cans[i] = ', i,cans[i]
seta.update2(ir, cans[i], Mlis[i])
seta._sort2()
print 'amons are sorted and regrouped'
cans = seta.cans; ncs = seta.ncs; nhas = seta.nhas
ncan = len(cans)
self.cans = cans
if not wsmi: return
nd = len(str(ncan))
s1 = 'EQ' if opr == '.eq.' else ''
svdw = '_vdw%d'%k2 if ivdw else ''
scomb = '_comb2' if nmaxcomb == 2 else ''
sthresh = '_dE%.2f'%thresh if thresh > 0 else ''
if prefix == '':
fdn = 'g%s%d%s%s_covL%d%s'%(s1,k,svdw,sthresh,covPLmin,scomb)
else:
fdn = prefix
if not os.path.exists(fdn): os.system('mkdir -p %s'%fdn)
self.fd = fdn
if iat is not None:
fdn += '_iat%d'%iat # absolute idx
if wg and (not os.path.exists(fdn+'/raw')): os.system('mkdir -p %s/raw'%fdn)
with open(fdn + '/' + fdn+'.smi', 'w') as fid:
fid.write('\n'.join( [ '%s %d'%(cans[i],ncs[i]) for i in range(ncan) ] ) )
dd.io.save('%s/maps.pkl'%fdn, {'maps': maps} )
if wg:
ms = seta.ms; ms0 = seta.ms0;
for i in range(ncan):
ms_i = ms[i]; ms0_i = ms0[i]
nci = ncs[i]
labi = '0'*(nd - len(str(i+1))) + str(i+1)
print ' ++ %d %06d/%06d %60s %3d'%(nhas[i], i+1, ncan, cans[i], nci)
for j in range(nci):
f_j = fdn + '/frag_%s_c%05d'%(labi, j+1) + '.sdf'
f0_j = fdn + '/raw/frag_%s_c%05d_raw'%(labi, j+1) + '.sdf'
m_j = ms_i[j]; m0_j = ms0_i[j]
Chem.MolToMolFile(m_j, f_j)
Chem.MolToMolFile(m0_j, f0_j)
print ' -- nmi_u = ', sum(ncs)
print ' -- ncan = ', len(np.unique(cans))
else:
if wsmi:
with open(fdn + '/' + fdn+'.smi', 'w') as fid:
fid.write('\n'.join( [ '%s'%(cans[i]) for i in range(ncan) ] ) )
"""
Codes below were borrowed from <NAME> and some changes were made to
be independent of any aqml.cheminfomatics software!
For an explanation of the algorithm see
http://dalkescientific.com/writings/diary/archive/2011/01/10/subgraph_enumeration.html
"""
#=========================================================================
class Subgraph(object):
def __init__(self, atoms, bonds):
self.atoms = atoms
self.bonds = bonds
def get_nbr(ia, b):
ia1, ia2 = b
if ia == ia1:
return ia2
else:
return ia1
def find_extensions(considered, new_atoms, b2a, a2b):
# Find the extensions from the atoms in 'new_atoms'.
# There are two types of extensions:
#
# 1. an "internal extension" is a bond which is not in 'considered'
# which links two atoms in 'new_atoms'.
#
# 2. an "external extension" is a (bond, to_atom) pair where the
# bond is not in 'considered' and it connects one of the atoms in
# 'new_atoms' to the atom 'to_atom'.
#
# Return the internal extensions as a list of bonds and
# return the external extensions as a list of (bond, to_atom) 2-ples.
internal_extensions = set()
external_extensions = []
#print 'type, val = ', type(new_atoms), new_atoms
for atom in new_atoms: # atom is atom_idx
ibsc = a2b[atom] # idxs of bond candidates
for outgoing_bond in ibsc[ ibsc >= 0 ]: #atom.GetBonds():
if outgoing_bond in considered:
continue
other_atom = get_nbr(atom, b2a[outgoing_bond]) #outgoing_bond.GetNbr(atom)
if other_atom in new_atoms:
# This this is an unconsidered bond going to
# another atom in the same subgraph. This will
# come up twice, so prevent duplicates.
internal_extensions.add(outgoing_bond)
else:
external_extensions.append( (outgoing_bond, other_atom) )
return list(internal_extensions), external_extensions
def all_combinations(container):
"Generate all 2**len(container) combinations of elements in the container"
# This just sets up the underlying call
return _all_combinations(container, len(container)-1, 0)
def _all_combinations(container, last, i):
# This does the hard work recursively
if i == last:
yield []
yield [container[i]]
else:
for subcombinations in _all_combinations(container, last, i+1):
yield subcombinations
yield [container[i]] + subcombinations
## I had an optimization that if limit >= len(external_extensions) then
## use this instead of the limited_external_combinations, but my timings
## suggest the result was slower, so I went for the simpler code.
#def all_external_combinations(container):
# "Generate all 2**len(container) combinations of external extensions"
# for external_combination in all_combinations(container):
# # For each combination yield 2-ples containing
# # {the set of atoms in the combination}, [list of external extensions]
# yield set((ext[1] for ext in external_combination)), external_combination
def limited_external_combinations(container, limit):
"Generate all 2**len(container) combinations which do not have more than 'limit' atoms"
return _limited_combinations(container, len(container)-1, 0, limit)
def _limited_combinations(container, last, i, limit):
# Keep track of the set of current atoms as well as the list of extensions.
# (An external extension doesn't always add an atom. Think of
# C1CC1 where the first "CC" adds two edges, both to the same atom.)
if i == last:
yield set(), []
if limit >= 1:
ext = container[i]
yield set([ext[1]]), [ext]
else:
for subatoms, subcombinations in _limited_combinations(container, last, i+1, limit):
assert len(subatoms) <= limit
yield subatoms, subcombinations
new_subatoms = subatoms.copy()
ext = container[i]
new_subatoms.add(ext[1])
if len(new_subatoms) <= limit:
yield new_subatoms, [ext] + subcombinations
def all_subgraph_extensions(subgraph, internal_extensions, external_extensions, k):
# Generate the set of all subgraphs which can extend the input subgraph and
# which have no more than 'k' atoms.
assert len(subgraph.atoms) <= k
if not external_extensions:
# Only internal extensions (test case: "C1C2CCC2C1")
it = all_combinations(internal_extensions)
it.next()
for internal_ext in it:
# Make the new subgraphs
bonds = frozenset(chain(subgraph.bonds, internal_ext))
yield set(), Subgraph(subgraph.atoms, bonds)
return
limit = k - len(subgraph.atoms)
if not internal_extensions:
# Only external extensions
# If we're at the limit then it's not possible to extend
if limit == 0:
return
# We can extend by at least one atom.
it = limited_external_combinations(external_extensions, limit)
it.next()
for new_atoms, external_ext in it:
# Make the new subgraphs
atoms = frozenset(chain(subgraph.atoms, new_atoms))
bonds = frozenset(chain(subgraph.bonds, (ext[0] for ext in external_ext)))
yield new_atoms, Subgraph(atoms, bonds)
return
# Mixture of internal and external (test case: "C1C2CCC2C1")
external_it = limited_external_combinations(external_extensions, limit)
it = product(all_combinations(internal_extensions), external_it)
it.next()
for (internal_ext, external) in it:
# Make the new subgraphs
new_atoms = external[0]
atoms = frozenset(chain(subgraph.atoms, new_atoms))
bonds = frozenset(chain(subgraph.bonds, internal_ext,
(ext[0] for ext in external[1])))
yield new_atoms, Subgraph(atoms, bonds)
return
def generate_subgraphs(b2a, a2b, k=5):
if k < 0:
raise ValueError("k must be non-negative")
# If you want nothing, you'll get nothing
if k < 1:
return
# Generate all the subgraphs of size 1
na = len(a2b)
for atom in range(na): #mol.GetAtoms():
yield Subgraph(frozenset([atom]), frozenset())
# If that's all you want then that's all you'll get
if k == 1:
return
# Generate the intial seeds. Seed_i starts with bond_i and knows
# that bond_0 .. bond_i will not need to be considered during any
# growth of of the seed.
# For each seed I also keep track of the possible ways to extend the seed.
seeds = []
considered = set()
nb = len(b2a)
for bond in range(nb): #mol.GetBonds():
considered.add(bond)
subgraph = Subgraph(frozenset(b2a[bond]), #[bond.GetBgn(), bond.GetEnd()]),
frozenset([bond]))
yield subgraph
internal_extensions, external_extensions = find_extensions(considered,
subgraph.atoms, b2a, a2b)
# If it can't be extended then there's no reason to keep track of it
if internal_extensions or external_extensions:
seeds.append( (considered.copy(), subgraph,
internal_extensions, external_extensions) )
# No need to search any further
if k == 2:
return
# seeds = [(considered, subgraph, internal, external), ...]
while seeds:
considered, subgraph, internal_extensions, external_extensions = seeds.pop()
# I'm going to handle all 2**n-1 ways to expand using these
# sets of bonds, so there's no need to consider them during
# any of the future expansions.
new_considered = considered.copy()
new_considered.update(internal_extensions)
new_considered.update(ext[0] for ext in external_extensions)
for new_atoms, new_subgraph in all_subgraph_extensions(
subgraph, internal_extensions, external_extensions, k):
assert len(new_subgraph.atoms) <= k
yield new_subgraph
# If no new atoms were added, and I've already examined
# all of the ways to expand from the old atoms, then
# there's no other way to expand and I'm done.
if not new_atoms:
continue
# Start from the new atoms to find possible extensions
# for the next iteration.
new_internal, new_external = find_extensions(new_considered, new_atoms, b2a, a2b)
if new_internal or new_external:
seeds.append( (new_considered, new_subgraph, new_internal, new_external) )
## test!
if __name__ == "__main__":
import time, sys, gzip
args = sys.argv[1:]
nargs = len(args)
if nargs == 0:
ss = ["[NH3+]CC(=O)[O-]", "CC[N+]([O-])=O", \
"C=C=C=CC=[N+]=[N-]", "CCS(=O)(=O)[O-]", \
"C#CS(C)(=C=C)=C=C", "C1=CS(=S(=O)=O)(=S(=O)=O)C=C1", \
"C#P=PP(#P)P(#P)P=P#P", \
"c1ccccc1S(=O)(=O)S(=O)(=N)S(=O)(=O)c2ccccc2"] # test molecules
k = 7
elif nargs == 1:
ss = args[0:1]
k = 7
elif nargs == 2:
ss = args[1:2]
k = int(args[0])
else:
raise SystemExit("""Usage: dfa_subgraph_enumeration.py <smiles> [<k>]
List all subgraphs of the given SMILES up to size k atoms (default k=5)
""")
for s in ss:
print '\n ## %s'%s
if not os.path.exists(s):
if s in ["C#P=PP(#P)P(#P)P=P#P",]:
print ' ** Problematic!! Openbabel cannot obtain the correct valence for atom like P in C#P=PP(#P)C'
t1 = time.time()
obj = amon(s, k)
cans = obj.get_amons()
for can in cans:
print can
t2 = time.time()
print ' time elapsed: ', t2-t1
else:
assert s[-3:] == 'smi'
fn = s[:-4]
ts = file(s).readlines()
icnt = 0
ids = []
for i,t in enumerate(ts):
si = t.strip()
print i+1, icnt+1, si
if '.' in si: continue
obj = ciao.amon(si, k)
if not obj.iok: print ' ** radical **'; continue
print ' ++ '
cansi = obj.get_amons()
print ' +++ '
nci = len(cansi)
map_i = []
for ci in cansi:
if ci not in cs:
cs.append(ci); map_i += [idxc]; idxc += 1
else:
jdxc = cs.index(ci)
if jdxc not in map_i: map_i += [jdxc]
print 'nci = ', nci
map_i += [-1,]*(nmaxc-nci)
maps.append( map_i )
#nmaxc = max(nmaxc, nci)
ids.append( i+1 )
icnt += 1
with open(fn+'_all.smi','w') as fo: fo.write('\n'.join(cs))
cs = np.array(cs)
maps = np.array(maps,np.int)
ids = np.array(ids,np.int)
dd.io.save(fn+'.pkl', {'ids':ids, 'cans':cs, 'maps':maps})
| 2.21875
| 2
|
section11/section_11_175_randomgame.py
|
anadebarros/ZTM_Complete_Python_Developer
| 0
|
12777461
|
<reponame>anadebarros/ZTM_Complete_Python_Developer<gh_stars>0
import sys
from random import randint
random_number = randint(int(sys.argv[1]), int(sys.argv[2]))
while True:
try:
number = int(
input('Please choose a number that falls between those two you just chose: '))
if number >= int(sys.argv[1]) and number <= int(sys.argv[2]):
if number == random_number:
print("You're a genius!")
break
except ValueError:
print("Please enter a number")
continue
| 4.0625
| 4
|
tsm/tsdb/helper.py
|
espang/projects
| 0
|
12777462
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 28 20:01:48 2015
The redis script_load method is inspired by 'Redis in Action' from Dr. <NAME>
--see https://github.com/josiahcarlson/redis-in-action
@author: Eike
"""
import redis
def script_load(script):
sha = [None]
def call(conn, keys=[], args=[], force_eval=False):
if force_eval:
return conn.execute_command(
"EVAL", script, len(keys), *(keys+args))
if not sha[0]:
sha[0] = conn.execute_command(
"SCRIPT", "LOAD", script, parse="LOAD")
try:
return conn.execute_command(
"EVALSHA", sha[0], len(keys), *(keys+args))
except redis.exceptions.ResponseError as msg:
if not msg.args[0].startswith("NOSCRIPT"):
raise
return call
| 3.0625
| 3
|
feder/letters/migrations/0018_auto_20180227_1926.py
|
dzemeuksis/feder
| 16
|
12777463
|
# Generated by Django 1.11.10 on 2018-02-27 19:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("letters", "0017_auto_20180227_1908")]
operations = [
migrations.AlterField(
model_name="letter",
name="record",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="letters_letter_related",
related_query_name="letters_letters",
to="records.Record",
),
)
]
| 1.585938
| 2
|
python_codes/twoSum.py
|
the-moonLight0/Hactober-fest-2021
| 11
|
12777464
|
<gh_stars>10-100
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
map={}#to store element and its index
list=[]
for i in range(len(nums)):
diff=target-nums[i]
if diff in map:#if nums[i]+nums[j]=target,then add its indexes to the list
list.append(map.get(diff))
list.append(i)
map[nums[i]]=i
return list
| 3.25
| 3
|
models/layers.py
|
KazukiChiyo/Vogel
| 1
|
12777465
|
# Author: <NAME>
# Date: Nov 13, 2018; revision: Mar 13, 2019
# License: MIT
import torch.nn as nn
import torch.nn.init as init
from torch.nn.init import kaiming_normal_, constant_
activation_functions = {
'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
'elu': nn.ELU,
'sigmoid': nn.Sigmoid,
'tanh': nn.Tanh,
'softplus': nn.Softplus,
'softmax': nn.Softmax
}
init_gain = {
'relu': 1.41414,
'leaky_relu': 1.41414,
'elu': 1.41414,
'sigmoid': 1,
'tanh': 1.66667,
'softplus': 1,
'softmax': 1
}
class _LayerNd(nn.Module):
def __init__(self, kernel_initializer, activation):
super(_LayerNd, self).__init__()
if isinstance(activation, str):
self.activation = activation_functions[activation]()
else:
self.activation = activation
if isinstance(kernel_initializer, str):
if kernel_initializer == 'normal':
self.kernel_initializer = init.normal_
elif kernel_initializer == 'kaiming':
self.kernel_initializer = init.kaiming_normal_
elif kernel_initializer == 'xavier':
self.kernel_initializer = init.xavier_normal_
self.gain = init_gain.setdefault(activation, 1)
elif kernel_initializer == 'orthogonal':
self.kernel_initializer = init.orthogonal_
self.gain = init_gain.setdefault(activation, 1)
else:
self.kernel_initializer = kernel_initializer
class Conv2DNorm(nn.Module):
"""Applies 2D convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(Conv2DNorm, self).__init__()
conv_base = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# if hasattr(self, 'gain'):
# self.kernel_initializer(conv_base.weight, gain=self.gain)
# else:
# self.kernel_initializer(conv_base.weight)
if batch_norm:
if activation:
self.conv = nn.Sequential(
conv_base,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_base,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.conv = nn.Sequential(
conv_base,
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_base)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.conv(x)
return x
# reference: https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
class SeparableConv2D(nn.Module):
"""Applies depthwise separable 2D convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(SeparableConv2D, self).__init__()
conv_depthwise = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias)
conv_pointwise = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias)
# init.xavier_normal_(conv_depthwise.weight)
# if hasattr(self, 'gain'):
# self.kernel_initializer(conv_pointwise.weight, gain=self.gain)
# else:
# self.kernel_initializer(conv_pointwise.weight)
if batch_norm:
if activation:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.conv(x)
return x
class ConvResidual2D(Conv2DNorm):
"""Convolutional 2D residual block with batch normalization and activation."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(ConvResidual2D, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, kernel_initializer=kernel_initializer, batch_norm=batch_norm, activation=activation)
def forward(self, x):
out = self.conv(x)
return x + out
class Deconv2DNorm(nn.Module):
"""Applies 2D transposed convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, output_padding=0, groups=1, bias=True, kernel_initializer='normal', dilation=1, batch_norm=False, activation=None):
super(Deconv2DNorm, self).__init__()
deconv_base = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
bias=bias,
dilation=dilation)
# if hasattr(self, 'gain'):
# self.kernel_initializer(deconv_base.weight, gain=self.gain)
# else:
# self.kernel_initializer(deconv_base.weight)
if batch_norm:
if activation:
self.deconv = nn.Sequential(
deconv_base,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.deconv = nn.Sequential(
deconv_base,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.deconv = nn.Sequential(
deconv_base,
nn.LeakyReLU(0.1,inplace=True))
else:
self.deconv = nn.Sequential(
deconv_base)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.deconv(x)
return x
def crop_like(input, target):
"""Crop input hieght and width to match target."""
if input.size()[2:] == target.size()[2:]:
return input
else:
return input[:, :, :target.size(2), :target.size(3)]
| 2.578125
| 3
|
ax/metrics/tests/test_chemistry.py
|
mpolson64/Ax-1
| 1
|
12777466
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from unittest import mock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.generator_run import GeneratorRun
from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_trial
class DummyEnum(Enum):
DUMMY: str = "dummy"
class ChemistryMetricTest(TestCase):
def testChemistryMetric(self):
# basic test
read_csv = pd.read_csv
for problem_type in (
ChemistryProblemType.DIRECT_ARYLATION,
ChemistryProblemType.SUZUKI,
):
with mock.patch(
"ax.metrics.chemistry.pd.read_csv",
wraps=lambda filename, index_col: read_csv(
filename, index_col=index_col, nrows=1
),
) as mock_read_csv:
metric = ChemistryMetric(name="test_metric", problem_type=problem_type)
self.assertFalse(metric.noiseless)
self.assertIs(metric.problem_type, problem_type)
self.assertFalse(metric.lower_is_better)
if problem_type is ChemistryProblemType.DIRECT_ARYLATION:
param_names = [
"Base_SMILES",
"Concentration",
"Ligand_SMILES",
"Solvent_SMILES",
"Temp_C",
]
param_values = (
"O=C([O-])C.[K+]",
0.1,
(
"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)"
"C4CCCCC4)C(OC)=CC=C2OC"
),
"CC(N(C)C)=O",
105,
)
obj = 5.47
else:
param_names = [
"Base_SMILES",
"Electrophile_SMILES",
"Ligand_SMILES",
"Nucleophile_SMILES",
"Solvent_SMILES",
]
param_values = (
"[Na+].[OH-]",
"ClC1=CC=C(N=CC=C2)C2=C1",
"CC(P(C(C)(C)C)C(C)(C)C)(C)C",
"CC1=CC=C(N(C2CCCCO2)N=C3)C3=C1B(O)O",
"N#CC",
)
obj = 4.76
params = dict(zip(param_names, param_values))
trial = get_trial()
trial._generator_run = GeneratorRun(
arms=[Arm(name="0_0", parameters=params)]
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(mock_read_csv.call_count, 1)
self.assertEqual(df["mean"].values[0], obj)
self.assertTrue(np.isnan(df["sem"].values[0]))
# test caching
metric.fetch_trial_data(trial)
self.assertEqual(mock_read_csv.call_count, 1)
# test noiseless
metric = ChemistryMetric(
name="test_metric", problem_type=problem_type, noiseless=True
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(df["sem"].values[0], 0.0)
| 2.390625
| 2
|
core/src/zeit/edit/tests/test_block.py
|
rickdg/vivi
| 5
|
12777467
|
from unittest import mock
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
import lxml.objectify
import persistent.interfaces
import zeit.cms.interfaces
import zeit.edit.testing
import zeit.edit.tests.fixture
import zope.component
class ElementUniqueIdTest(zeit.edit.testing.FunctionalTestCase):
def setUp(self):
super(ElementUniqueIdTest, self).setUp()
xml = lxml.objectify.fromstring("""
<container
xmlns:cp="http://namespaces.zeit.de/CMS/cp"
cp:__name__="body">
<block cp:type="block" cp:__name__="foo"/>
</container>""")
content = self.repository['testcontent']
self.container = zeit.edit.tests.fixture.Container(content, xml)
self.block = zeit.edit.tests.fixture.Block(
self.container, xml.block)
# Fake traversal ability.
ExampleContentType.__getitem__ = lambda s, key: self.container
def tearDown(self):
del ExampleContentType.__getitem__
super(ElementUniqueIdTest, self).tearDown()
def test_block_ids_are_composed_of_parent_ids(self):
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de/testcontent#body',
self.container.uniqueId)
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de/testcontent#body/'
'foo',
self.block.uniqueId)
def test_resolving_block_ids_uses_traversal(self):
block = zeit.cms.interfaces.ICMSContent(self.block.uniqueId)
self.assertEqual(block, self.block)
def test_block_without_name_uses_index(self):
del self.block.xml.attrib['{http://namespaces.zeit.de/CMS/cp}__name__']
with mock.patch('zeit.edit.tests.fixture.Container.index') as index:
index.return_value = 0
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de'
'/testcontent#body/0', self.block.uniqueId)
def test_block_equality_compares_xml(self):
xml = """
<container xmlns:cp="http://namespaces.zeit.de/CMS/cp">
<block cp:type="block" cp:__name__="foo"/>
</container>"""
xml1 = lxml.objectify.fromstring(xml)
xml2 = lxml.objectify.fromstring(xml)
# CAUTION: xml1 == xml2 does not do what one might think it does,
# thus block equality uses a proper in-depth xml comparison:
self.assertNotEqual(xml1, xml2)
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertEqual(block1, block2)
def test_blocks_are_unequal_when_text_nodes_differ(self):
# Upstream xmldiff wants to write to (a copy of) text nodes, which is
# not possible with lxml.objectify.
xml1 = lxml.objectify.fromstring("""
<container>
<foo>bar</foo>
</container>""")
xml2 = lxml.objectify.fromstring("""
<container>
<foo>qux</foo>
</container>""")
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertNotEqual(block1, block2)
def test_blocks_are_unequal_when_tag_counts_differ(self):
xml1 = lxml.objectify.fromstring("""
<foo><one/></foo>
""")
xml2 = lxml.objectify.fromstring("""
<foo><one/><two/><three/></foo>
""")
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertNotEqual(block1, block2)
class ElementFactoryTest(zeit.edit.testing.FunctionalTestCase):
def test_factory_returns_interface_implemented_by_element(self):
context = mock.Mock()
zope.interface.alsoProvides(context, persistent.interfaces.IPersistent)
container = zeit.edit.tests.fixture.Container(
context, lxml.objectify.fromstring('<container/>'))
block_factory = zope.component.getAdapter(
container, zeit.edit.interfaces.IElementFactory, 'block')
self.assertEqual(
zeit.edit.tests.fixture.IBlock, block_factory.provided_interface)
| 2
| 2
|
LeetCode/257. Binary Tree Paths.py
|
QinganZhao/LXXtCode
| 3
|
12777468
|
<reponame>QinganZhao/LXXtCode<filename>LeetCode/257. Binary Tree Paths.py
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
### path outside the stack
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
path, pathsNode, paths = [], [], []
stack = [root]
current = root
while stack:
current = stack.pop()
while True:
if not path or current in [path[-1].left,
path[-1].right]:
path.append(current)
break
else:
path.pop()
if not current.left and not current.right:
pathsNode.append(path[:])
elif current.left and current.right:
stack.extend([current.right, current.left])
else:
stack.append(current.left or current.right)
for pathNode in pathsNode:
paths.append('->'.join(map(lambda x: str(x.val), pathNode)))
return paths
### path inside the stack
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
stack = [(root, str(root.val))]
paths = []
while stack:
current = stack.pop()
if not current[0].left and not current[0].right:
paths.append(current[1])
if current[0].right:
stack.append((current[0].right, current[1] + '->'
+ str(current[0].right.val)))
if current[0].left:
stack.append((current[0].left, current[1] + '->'
+ str(current[0].left.val)))
return paths
| 3.859375
| 4
|
aquests/protocols/http/localstorage.py
|
hansroh/aquests
| 8
|
12777469
|
import base64
import random
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from . import util
g = None
def create (logger):
global g
g = LocalStorage (logger)
class LocalStorage:
def __init__ (self, logger):
self.logger = logger
self.cookie = {}
self.data = {}
def get_host (self, url):
return urlparse (url) [1].split (":") [0].lower ()
def set_item (self, url, key, val):
host = self.get_host (url)
if host not in self.data:
self.data = {}
self.data [key] = val
def get_item (self, url, key):
host = self.get_host (url)
try:
return self.data [host][key]
except KeyError:
return
def get_cookie_as_list (self, url):
cookie = []
for domain in self.cookie:
netloc, script = urlparse (url) [1:3]
netloc = netloc.lower ()
if ("." + netloc).find (domain) > -1:
for path in self.cookie [domain]:
if script.find (path) > -1:
cookie += list(self.cookie [domain][path].items ())
return cookie
def get_cookie_as_dict (self, url):
cookie = self.get_cookie_as_list (url)
dict = {}
if cookie:
for k, v in cookie:
dict [k] = v
return dict
def get_cookie (self, url, key):
d = self.get_cookie_as_dict ()
try:
return d [key]
except KeyError:
return None
def get_cookie_as_string (self, url):
cookie = self.get_cookie_as_list (url)
if cookie:
return "; ".join (["%s=%s" % (x, y) for x, y in cookie])
return ""
def set_cookie_from_data (self, url, cookie):
host = self.get_host (url)
self.cookie [host] = {}
self.cookie [host]["/"] = {}
if type (cookie) != type ([]):
cookie = util.strdecode (cookie, 1)
for k, v in cookie:
self.cookie [host]["/"][k] = v
def clear_cookie (self, url):
url = url.lower ()
for domain in list(self.cookie.keys ()):
if url.find (domain) > -1:
del self.cookie [domain]
def set_cookie (self, url, key, val, domain = None, path = "/"):
if domain is None:
domain = self.get_host (url)
try: self.cookie [domain]
except KeyError: self.cookie [domain] = {}
try: self.cookie [domain][path]
except KeyError: self.cookie [domain][path] = {}
self.cookie [domain][path][key] = val
def del_cookie (self, url, key):
if domain is None:
domain = self.get_host (url)
try: self.cookie [domain]
except KeyError: return
try: self.cookie [domain][path]
except KeyError: return
for path in self.cookie [domain]:
del self.cookie [domain][path][key]
def set_cookie_from_string (self, url, cookiestr):
host = self.get_host (url)
ckey, cval = '', ''
s = {}
count = 0
for element in cookiestr.split (";"):
try:
k, v = element.split ("=", 1)
except:
k, v = element, ''
if count == 0:
if v.find ("%") != -1:
ckey, cval = k.strip (), v.strip ()
else:
ckey, cval = k.strip (), v.strip ()
else:
s [k.strip ().lower ()] = v.strip ().lower ()
count += 1
try: domain = s ['domain']
except KeyError: domain = host
try: path = s ['path']
except KeyError: path = '/'
try: self.cookie [domain]
except KeyError: self.cookie [domain] = {}
try: self.cookie [domain][path]
except KeyError: self.cookie [domain][path] = {}
self.cookie [domain][path][ckey] = cval
| 2.671875
| 3
|
HPCscripts/grid_response.py
|
vetlewi/AFRODITE
| 0
|
12777470
|
<filename>HPCscripts/grid_response.py<gh_stars>0
import numpy as np
from typing import Dict, List, Optional
from pathlib import Path
class MacroGenResponse:
def __init__(self, energy: Optional[float] = None,
nevent: Optional[int] = None):
self.energy = energy
self.nevent = nevent
self.outdir = "../data/"
self.outname_base = "grid_" # optionally: add eg sim_001_
self._base_geometry_cmd = "/control/execute setup_normal_run.mac"
def compose(self):
return '\n'.join(*[self.geometry() + self.run()])
def save(self, fname):
fn = Path(fname)
fn.write_text(self.compose())
def geometry(self,
unit: str = "cm") -> List[str]:
string = [
self._base_geometry_cmd,
"/run/initialize",
""]
return string
def run(self) -> List[str]:
assert np.issubdtype(type(self.nevent), np.integer)
outname_base = Path(self.outdir)
fnout = Path(f"{self.outname_base}{self.energy}keV_n{self.nevent}")
fnout = outname_base / fnout.with_suffix(".root")
def basestring(energykeV, fnout, nevent):
res = ["# Particle type, position, energy...",
"/gps/particle gamma",
"/gps/number 1",
"",
"# Particle source distribution",
"/gps/pos/type Plane",
"/gps/pos/shape Ellipse",
"/gps/pos/centre 0. 0. 0. mm",
"/gps/pos/halfx 0.75 mm",
"/gps/pos/halfy 1.25 mm",
"/gps/ang/type iso",
"",
f"/gps/energy {energykeV} keV",
f"/OCL/setOutName {fnout}",
"",
"# Number of events to run",
f"/run/beamOn {nevent}",
]
return res
string = basestring(self.energy, fnout, self.nevent)
# flat_list = [item for sublist in string for item in sublist]
return string
if __name__ == "__main__":
energy_grid = np.arange(50, 1e4, 10, dtype=int)
nevents = np.linspace(6e5, 3e6, len(energy_grid), dtype=np.int)
energy_grid = np.append(energy_grid, [int(1.2e4), int(1.5e4), int(2e4)])
nevents = np.append(nevents, [int(3e6), int(3e6), int(3e6)])
fnbase = Path("response_grid_macs")
fnbase.mkdir(exist_ok=True)
for i, (energy, nevent) in enumerate(zip(energy_grid, nevents)):
# print(f"Simulating gridpoint {i}")
macgen = MacroGenResponse(energy=energy, nevent=nevent)
macro = macgen.save(fnbase / f"grid_{i}.mac")
# create summary file with commands to run
# sorted by decreasing computation time (highest energies first)
indices = np.arange(len(energy_grid))
# np.random.shuffle(indices)
cmds = [f"./OCL {fnbase}/grid_{i}.mac > $LOGDIR/out.o$LAUNCHER_JID"
for i in indices[::-1]]
cmd_string = "\n".join(*[cmds])
fn_sum = Path("response_grid_cmds.txt")
fn_sum.write_text(cmd_string)
| 2.46875
| 2
|
pyjfuzz/core/pjf_encoder.py
|
zyLiu6707/PyJFuzz
| 0
|
12777471
|
<reponame>zyLiu6707/PyJFuzz<filename>pyjfuzz/core/pjf_encoder.py
"""
The MIT License (MIT)
Copyright (c) 2016 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from string import printable as p
import json
import sys
import re
class PJFEncoder(object):
"""
Class that represent a JSON encoder / decoder
"""
@staticmethod
def json_encode(func):
"""
Decorator used to change the return value from PJFFactory.fuzzed, it makes the structure printable
"""
def func_wrapper(self, indent, utf8):
if utf8:
encoding = "\\x%02x"
else:
encoding = "\\u%04x"
hex_regex = re.compile(r"(\\\\x[a-fA-F0-9]{2})")
unicode_regex = re.compile(r"(\\u[a-fA-F0-9]{4})")
def encode_decode_all(d, _decode=True):
if type(d) == dict:
for k in d:
if type(d[k]) in [dict, list]:
if _decode:
d[k] = encode_decode_all(d[k])
else:
d[k] = encode_decode_all(d[k], _decode=False)
elif type(d[k]) == str:
if _decode:
d[k] = decode(d[k])
else:
d[k] = encode(d[k])
elif type(d) == list:
arr = []
for e in d:
if type(e) == str:
if _decode:
arr.append(decode(e))
else:
arr.append(encode(e))
elif type(e) in [dict, list]:
if _decode:
arr.append(encode_decode_all(e))
else:
arr.append(encode_decode_all(e, _decode=False))
else:
arr.append(e)
return arr
else:
if _decode:
return decode(d)
else:
return encode(d)
return d
def decode(x):
tmp = "".join(encoding % ord(c) if c not in p else c for c in x)
if sys.version_info >= (3, 0):
return str(tmp)
else:
for encoded in unicode_regex.findall(tmp):
tmp = tmp.replace(encoded, encoded.decode("unicode_escape"))
return unicode(tmp)
def encode(x):
for encoded in hex_regex.findall(x):
if sys.version_info >= (3, 0):
x = x.replace(encoded,
bytes(str(encoded).replace("\\\\x", "\\x"), "utf-8").decode("unicode_escape"))
else:
x = x.replace(encoded, str(encoded).replace("\\\\x", "\\x").decode("string_escape"))
return x
if indent:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)), indent=5)),
_decode=False)
else:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)))), _decode=False)
return func_wrapper
| 2.234375
| 2
|
code/functions.py
|
Grupo-de-Oceanografia-Costeira/TCC_Vini_Public
| 0
|
12777472
|
<filename>code/functions.py
import pandas as pd
import numpy as np
import collections
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import geopandas as gp
def load(cnv):
"""
This function opens our .cnv file and reads it. It then creates a list
with five elements: two lists containing the file headers (that start with
* and with #), a list with variables, and a list of lists with the data itself.
Run like the following:
hd1, hd2, variables, datapoints = load('file')
"""
o = open(cnv)
r = o.readlines()
o.close()
hd1, hd2, variables, datapoints = [], [], [], []
for line in r:
if not line:
pass
elif line.startswith("*"):
hd1.append(line)
elif line.startswith("#"):
hd2.append(line)
if line.startswith("# name"):
line = line.split()
variables.append(line[4])
else:
float_list = []
line = line.split()
for item in line:
float_list.append(float(item))
datapoints.append(float_list)
datapoints = filter(None, datapoints)
df = pd.DataFrame(datapoints, columns=variables)
return hd1, hd2, variables, datapoints
def split_stations(arg1, arg2, arg3=None, arg4=None, arg5=None):
"""
arg1 is a list of lists, each list being a row of data, like the
'datapoints' variable generated in the load() function. arg2 is a list of
strings with the station names IN THE ORDER THEY WERE SAMPLED. This can
be loaded from a .csv file. arg3 is a list of the variables that will be
the columns for the resulting dataframes. It should be generated with the
load() function.
"""
d = collections.OrderedDict()
for st in arg2:
d[st] = []
ix = 0
st_values = []
for line in arg1:
if line[1] >= 0.1:
line.append(arg2[ix]) # station names
line.append(arg4[ix]) # station lat
line.append(arg5[ix]) # station lon
st_values.append(line)
elif line[1] < 0.1:
if len(st_values) < 4:
st_values = []
elif len(st_values) >= 4:
for line in st_values:
d[arg2[ix]].append(line)
st_values = []
ix += 1
arg3.append("STATION")
arg3.append("LAT")
arg3.append("LONG")
for st in d:
d[st] = pd.DataFrame(d[st], columns=arg3)
return d
def remove_upcast(station):
depth = station["depSM:"]
up = depth.idxmax() + 1
station = station.loc[:up]
return station
def plot(arg1, arg2=None):
"""
Easy temperature, salinity and density multiplot
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
tem, dep, tim, sal, den = (
arg1["t090"],
arg1["depSM"],
arg1["timeJ"],
arg1["sal00"],
arg1["sigma-t00"],
)
i1 = interp1d(list(tem), (dep), kind="cubic")
i2 = interp1d(sal, dep, kind="cubic")
i3 = interp1d(den, dep, kind="cubic")
ax1.plot(tem, dep, "o", tem, i1(tem), "--", color="red")
ax1.set_ylabel("Depth [m]")
ax1.set_title("Temperature [deg C]")
ax2.plot(sal, dep, "o", sal, i2(sal), "--", color="blue")
ax2.set_title("Salinity [PSU]")
ax3.plot(den, dep, "o", den, i3(den), "--", color="green")
ax3.set_title("Density [kg/m^3]")
plt.ylim((-0.5, 8.0))
plt.gca().invert_yaxis()
title = str(arg1)
if arg2 is None:
plt.show()
else:
fname = arg2 + "/" + arg1["Station ID"][0] + ".png"
plt.savefig(fname)
def sectionplot_sal(arg, arg2=None, arg3=None):
# Arrays storing salinity data in sals list
sals = []
for i in arg:
sals.append(np.array(i["sal00:"]))
# Setting salinity range for colorbar
sal_range = np.arange(1, 40.1, 1)
# Arrays storing depth data in deps list
deps = []
for i in arg:
deps.append(np.array(i["depSM:"]))
# Setting the x axis values for salinity
x = np.array([])
ix = 0
for i in sals:
x = np.append(x, [ix] * len(i))
ix += 1
# Setting the y axis values for depth
y = np.array([])
ix = 0
for i in deps:
y = np.append(y, i)
ix += 1
# Setting the color values
z = np.concatenate(tuple(sals))
# Generating the gridded data
xi = np.linspace(np.min(x), np.max(x), 200)
yi = np.linspace(np.min(y), np.max(y), 200)
xi, yi = np.meshgrid(xi, yi)
zi = mlab.griddata(
x, y, z, xi, yi, interp="linear"
) # interp = 'linear' se der erro no Natgrid
# Plotting the gridded data
plt.figure() # Starting the figure object
plt.pcolormesh(xi, yi, zi, vmin=z.min(), vmax=z.max()) # Adding the colour mesh
plt.contour(xi, yi, zi, colors="k") # Contour lines
plt.scatter(x, y, c=z, vmin=z.min(), vmax=z.max()) # Adding the scatter points
plt.xticks(range(0, len(arg) + 1), ["Estacao " + i["STATION"][0][2:] for i in arg])
plt.colorbar().set_label("Salinidade")
plt.axis([np.min(x), np.max(x), np.min(y), np.max(y)])
plt.gca().invert_yaxis()
plt.ylabel("Profundidade (m)")
if arg2:
plt.title(arg2)
if arg3:
plt.savefig(arg3 + arg2.split()[0].strip() + "_section_", transparent=True)
else:
plt.show()
| 3.25
| 3
|
Python3/0099-Recover-Binary-Search-Tree/soln.py
|
wyaadarsh/LeetCode-Solutions
| 5
|
12777473
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
# do in-order traverse
swap = [None, None]
cur = root
stack = []
pre = TreeNode(float('-inf'))
while cur or stack:
while cur:
stack.append(cur)
cur = cur.left
cur = stack.pop()
if cur.val < pre.val:
if not swap[0]: swap[0] = pre
swap[1] = cur
pre = cur
cur = cur.right
swap[0].val, swap[1].val = swap[1].val, swap[0].val
| 3.703125
| 4
|
helx/rl/memory.py
|
epignatelli/helx
| 1
|
12777474
|
<gh_stars>1-10
import abc
import logging
from collections import deque
from typing import Callable, NamedTuple
import dm_env
import jax
import jax.numpy as jnp
from dm_env import specs
from helx.jax import device_array
from helx.random import PRNGSequence
from helx.typing import Action, Batch, Discount, Key, Observation, Reward, TraceDecay
from jaxlib.xla_extension import Device
class Transition(NamedTuple):
"""A (s, a, r, s', a', γ, λ) transition with discount and lambda factors"""
s: Observation # observation at t=0
a: Action # actions at t=0
r: Reward # rewards at t=1
s1: Observation # observatin at t=1 (note multistep)
a1: Action # action at t=1
g: Discount = 1.0 # discount factor
l: TraceDecay = 1.0 # trace decay for lamba returns
class Trajectory(NamedTuple):
"""A set of batched transitions"""
observations: Batch[Observation] # [T + 1, *obs.shape]
actions: Batch[Action] # [T, 1] if off-policy, [T + 1, 1] otherwise
rewards: Batch[Reward] # [T, 1]
discounts: Batch[Discount] = None # [T, 1]
trace_decays: Batch[TraceDecay] = None # [T, 1]
class IBuffer(abc.ABC):
@abc.abstractmethod
def add(
self,
timestep: dm_env.TimeStep,
action: int,
new_timestep: dm_env.TimeStep,
preprocess=lambda x: x,
) -> None:
raise NotImplementedError
@abc.abstractmethod
def full(self) -> bool:
raise NotImplementedError
@abc.abstractmethod
def add(
self,
timestep: dm_env.TimeStep,
action: int,
new_timestep: dm_env.TimeStep,
preprocess=lambda x: x,
) -> None:
raise NotImplementedError
@abc.abstractmethod
def sample(self, n: int, rng: Key = None) -> Trajectory:
raise NotImplementedError
class ReplayBuffer(IBuffer):
"""A replay buffer used for Experience Replay (ER):
<NAME>., 1993, https://apps.dtic.mil/sti/pdfs/ADA261434.pdf.
This type of buffer is usually used
with off-policy methods, such as DQN or ACER.
Note that, to save memory, it stores only the two extremes
of the trajectory and accumulates the discunted rewards
at each time step, to calculate the value target.
However, this does not allow for off-policy corrections with nstep methods
at consumption time. To perform off-policy corrections, please store
the action probabilities foreach time step in the buffer.
"""
def __init__(
self,
capacity: int,
n_steps: int = 1,
seed: int = 0,
device: Device = None,
):
# public:
self.capacity = capacity
self.n_steps = n_steps
self.seed = seed
self.device = device
self.trajectories = deque(maxlen=capacity)
# private:
self._rng = jax.random.PRNGKey(seed)
self._reset()
def __len__(self):
return len(self.trajectories)
def __getitem__(self, idx):
return self.trajectories[idx]
def full(self) -> bool:
return len(self) == self.capacity
def collecting(self) -> bool:
return self._t < self.n_steps
def add(
self,
timestep: dm_env.TimeStep,
action: int,
new_timestep: dm_env.TimeStep,
preprocess=lambda x: x,
) -> None:
# start of a new episode
if not self.collecting():
self.trajectories.append(self._current)
self._reset()
# add new transition to the trajectory
store = lambda x: device_array(x, device=self.device)
self._current.observations[self._t] = preprocess(store(timestep.observation))
self._current.actions[self._t] = store(int(action))
self._current.rewards[self._t] = store(float(new_timestep.reward))
self._current.discounts[self._t] = store(float(new_timestep.discount))
self._t += 1
# ready to store, just add final observation
if not self.collecting():
self._current.observations[self._t] = preprocess(
jnp.array(timestep.observation, dtype=jnp.float32)
)
# if not enough samples, and we can't sample the env anymore, reset
elif new_timestep.last():
self._reset()
return
def sample(self, n: int, rng: Key = None, device: Device = None) -> Trajectory:
high = len(self) - n
if high <= 0:
logging.warning(
"The buffer contains less elements than requested: {} <= {}\n"
"Returning all the available elements".format(len(self), n)
)
indices = range(len(self))
elif rng is None:
rng, _ = jax.random.split(self._rng)
indices = jax.random.randint(rng, (n,), 0, high)
collate = lambda x: device_array(x, device=device)
obs = collate([self.trajectories[idx].observations for idx in indices])
actions = collate([self.trajectories[idx].actions for idx in indices])
rewards = collate([self.trajectories[idx].rewards for idx in indices])
discounts = collate([self.trajectories[idx].discounts for idx in indices])
# traces = collate([self.trajectories[idx].trace_decays for idx in indices])
return Trajectory(obs, actions, rewards, discounts)
def _reset(self):
self._t = 0
self._current = Trajectory(
observations=[None] * (self.n_steps + 1),
actions=[None] * self.n_steps,
rewards=[None] * self.n_steps,
discounts=[None] * self.n_steps,
trace_decays=[None] * self.n_steps,
)
class OnlineBuffer(IBuffer):
"""A replay buffer that stores a single n-step trajectory
of experience.
This type of buffer is most commonly used with online methods,
generally on-policy methods, such as A2C.
"""
def __init__(
self,
observation_spec: specs.Array,
n_steps: int = 1,
batch_size: int = 1,
):
# public:
self.observation_spec = observation_spec
self.n_steps = n_steps
self.batch_size = batch_size
# private:
self._reset()
def full(self) -> bool:
return self._t == self.n_steps - 1
def add(
self,
timestep: dm_env.TimeStep,
action: int,
new_timestep: dm_env.TimeStep,
trace_decay: TraceDecay = 1.0,
preprocess: Callable[[Observation], Observation] = lambda x: x,
) -> None:
# if buffer is full, prepare for new trajectory
if self.full():
self._reset()
# add new transition to the trajectory
self.trajectory.observations[self._t] = preprocess(
jnp.array(timestep.observation, dtype=jnp.float32)
)
self.trajectory.actions[self._t] = action
self.trajectory.rewards[self._t] = new_timestep.reward
self.trajectory.discounts[self._t] = new_timestep.discount
self.trajectory.trace_decays[self._t] = trace_decay
self._t += 1
# if we have enough transitions, add last obs and return
if self.full():
self.trajectory.observations[self._t] = preprocess(
jnp.array(new_timestep.observation, dtype=jnp.float32)
)
# if we do not have enough transitions, and can't sample more, retry
elif new_timestep.last():
self._reset()
return
def sample(self, n: int = 1, rng: Key = None) -> Trajectory:
return self.trajectory
def _reset(self):
self._t = 0
self.trajectory = Trajectory(
observations=jnp.empty(
self.n_steps + 1, self.batch_size, *self.observation_spec.shape
),
actions=jnp.empty(self.n_steps, self.batch_size, 1),
rewards=jnp.empty(self.n_steps, self.batch_size, 1),
discounts=jnp.empty(self.n_steps, self.batch_size, 1),
trace_decays=jnp.empty(self.n_steps, self.batch_size, 1),
)
return
class EpisodicMemory(IBuffer):
def __init__(self, seed: int = 0):
# public:
self.states = []
# private:
self._terminal = False
self._rng = PRNGSequence(seed)
self._reset()
def __len__(self):
return len(self.states)
def __getitem__(self, idx):
return self.states[idx]
def full(self) -> bool:
return self._terminal
def add(
self,
timestep: dm_env.TimeStep,
action: int,
new_timestep: dm_env.TimeStep,
preprocess: Callable = lambda x: x,
) -> None:
# if buffer is full, prepare for new trajectory
if self.full():
self._reset()
# collect experience
self.states.append(preprocess(timestep.observation))
self._terminal = new_timestep.last()
# if transition is terminal, append last state
if self.full():
self.states.append(preprocess(new_timestep.observation))
return
def sample(self, n: int, rng: Key = None) -> Trajectory:
key = next(self._rng)
indices = jax.random.randint(key, (n,), 0, len(self))
return [self.states[idx] for idx in indices]
def _reset(self):
self._terminal = False
self.states = []
| 2.03125
| 2
|
setup.py
|
FongAnthonyM/python-hdf5objects
| 0
|
12777475
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
""" setup.py
The setup for this package.
"""
# Package Header #
from src.hdf5objects.__header__ import *
# Header #
__author__ = __author__
__credits__ = __credits__
__maintainer__ = __maintainer__
__email__ = __email__
# Imports #
# Standard Libraries #
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
# Third-Party Packages #
from setuptools import find_packages
from setuptools import setup
# Definitions #
# Functions #
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
# Main #
setup(
name=__package_name__,
version=__version__,
license=__license__,
description='Extra fileobjects for handling and typing HDF5 files.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/fonganthonym/python-hdf5objects',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://python-hdf5objects.readthedocs.io/',
'Changelog': 'https://python-hdf5objects.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/fonganthonym/python-hdf5objects/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=3.6',
install_requires=[
'baseobjects>=1.5.1', 'classversioning', 'framestructure', 'dspobjects', 'h5py>=3.2.1', 'numpy',
'multipledispatch', 'pytz', 'tzlocal', 'bidict'
],
extras_require={
"dev": ['pytest>=6.2.3'],
},
entry_points={
'console_scripts': [
'hdf5objects = hdf5objects.cli:main',
]
},
)
| 1.789063
| 2
|
OOP/car.py
|
dimi-fn/Various-Data-Science-Scripts
| 8
|
12777476
|
class Car:
'''
* Creating a class called "Car"
* Properties/attributes: brand, colour, horses, country production, current speed
* "current_speed" is set to 0, unless other value is assigned
* Method definitions:
* def move_car() moves the car by 10
* def accelerate_car() accelerates the car by the given value and adds that speed to "current_speed"
* def stop_car() sets "current_speed" to 0
* def car_details() returns the properties of the "Car" class
'''
def __init__(self, brand, colour, horses, country_production, current_speed = 0):
self.brand = brand
self.colour= colour
self.horses = horses
self.country_production = country_production
self.current_speed = current_speed
def move_car(self):
self.current_speed += 10
def accelerate_car(self, value):
self.current_speed += value
def stop_car(self):
self.current_speed = 0
def car_details(self):
print ("Brand: {}\nColour: {}\nHorses: {}\nCountry production: {}\nCurrent speed: {}\n".format(
self.brand, self.colour, self.horses, self.country_production, self.current_speed))
| 4.3125
| 4
|
helper/pointCloud.py
|
lidiaxp/plannie
| 6
|
12777477
|
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from geometry_msgs.msg import PoseArray, Pose
from tf.transformations import euler_from_quaternion
import time
import math
import struct
import ctypes
from scipy import ndimage
import matplotlib.pyplot as plt
from nav_msgs.msg import Odometry
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
class identifyObstacle3D:
def __init__(self):
self.currentPosX, self.currentPosY, self.currentPosZ, self.currentPosYaw = 2, 2, 2, 0
self.count = 0
self.unic = 0
self.pub = rospy.Publisher('/build_map3D', PoseArray, queue_size=1)
self.all = []
self.obsX, self.obsY, self.obsZ = [], [], []
self.t = time.time()
self.number_of_sampling = 30
rospy.init_node("obstacle3D")
print("Start")
# _ = rospy.Subscriber("/uav1/velodyne/scan", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/rs_d435/depth/points", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/odometry/odom_main", Odometry, self.callbackPosicao)
def callbackPosicao(self, odom):
_, _, yaw = euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])
if self.count == 0:
self.lastYaw = yaw
self.currentPosX = odom.pose.pose.position.x
self.currentPosY = odom.pose.pose.position.y
self.currentPosY = odom.pose.pose.position.z
self.currentPosYaw = yaw
self.count += 1
def rotationMatrix(self, psi0, x1, y1, z1):
r = [[np.cos(psi0), np.sin(psi0) * -1, 0], [np.sin(psi0), np.cos(psi0), 0], [0, 0, 1]]
pos_local = np.dot(np.transpose(np.asarray(r)), np.asarray([x1, y1, z1]))
return pos_local
def callbackObstacle(self, data):
print(time.time()-self.t)
if self.count > 0:
a4, a5, a6 = [], [], []
a1, a2, a3 = [], [], []
x, y, z = [], [], []
abc = []
matriz = np.zeros((101, 101))
xyz = np.array([[0,0,0]])
gen = point_cloud2.read_points(data, skip_nans=True)
int_data = list(gen)
for x in int_data:
if round(x[2]) > 0 and [round(x[0]), round(-x[1]), round(x[2])] not in abc:
a4.append(round(x[0]))
a5.append(round(-x[1]))
a6.append(round(x[2]))
abc.append([round(x[0]), round(-x[1]), round(x[2])])
pl = self.rotationMatrix(0, a4, a5, a6)
for i1, i2, i3 in zip(pl[0], pl[1], pl[2]):
a1.append(i2)
a2.append(i1)
a3.append(i3)
xyz = np.append(xyz,[[i2, i1, i3]], axis = 0)
self.count += 1
if 8<time.time()-self.t<13:
ax = plt.axes(projection = "3d")
ax.plot3D(a1, a2, a3, 'y.')
ax.plot3D([self.currentPosX], [self.currentPosY], [self.currentPosZ], ".r")
ax.set_xlim(0,20)
ax.set_ylim(0,20)
ax.set_zlim(0,20)
ax.set_xlabel("x (m)" + str(self.currentPosX))
ax.set_ylabel("y (m)" + str(self.currentPosY))
ax.set_zlabel("z (m)" + str(self.currentPosZ))
ax.view_init(50, -137)
plt.pause(0.01)
plt.show()
def main():
identifyObstacle3D()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| 2.25
| 2
|
src/httpdaemon/request.py
|
jamessimmonds/PyHTTPDaemon
| 0
|
12777478
|
import re
class HttpRequest:
"""
Parser for HTTP requests
"""
def __init__(self, request):
"""
Accepts an HTTP request bytestring
"""
# Convert from bytes to string
self.request = request.decode("utf-8")
self.requestline = re.match("GET .* HTTP/1.1", self.request).group(0)
self.url = re.search("\s.*\s", self.requestline).group(0)[1:-1]
self.params = {}
if '?' in self.url:
elems = self.url.split('?')
self.path = elems[0]
self.query = elems[1]
querylines = self.query.split('&')
for line in querylines:
linekey, lineval = line.split('=')
self.params[linekey] = lineval
else:
self.path = self.url
| 3.40625
| 3
|
server/stylegan2_hypotheses_explorer/models/style_configuration_style_array.py
|
HealthML/StyleGAN2-Hypotheses-Explorer
| 2
|
12777479
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import Dict, List # noqa: F401
from ..util import deserialize_model
from .base_model_ import Base_Model
class StyleConfigurationStyleArray(Base_Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, style1: int = None, style2: int = None, proportion_style1: float = None): # noqa: E501
"""StyleConfigurationStyleArray - a model defined in Swagger
:param style1: The style1 of this StyleConfigurationStyleArray. # noqa: E501
:type style1: int
:param style2: The style2 of this StyleConfigurationStyleArray. # noqa: E501
:type style2: int
:param proportion_style1: The proportion_style1 of this StyleConfigurationStyleArray. # noqa: E501
:type proportion_style1: float
"""
self.swagger_types = {
'style1': int,
'style2': int,
'proportion_style1': float
}
self.attribute_map = {
'style1': 'style1',
'style2': 'style2',
'proportion_style1': 'proportionStyle1'
}
self._style1 = style1
self._style2 = style2
self._proportion_style1 = proportion_style1
@classmethod
def from_dict(cls, dikt) -> 'StyleConfigurationStyleArray':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The StyleConfiguration_styleArray of this StyleConfigurationStyleArray. # noqa: E501
:rtype: StyleConfigurationStyleArray
"""
return deserialize_model(dikt, cls)
@property
def style1(self) -> int:
"""Gets the style1 of this StyleConfigurationStyleArray.
:return: The style1 of this StyleConfigurationStyleArray.
:rtype: int
"""
return self._style1
@style1.setter
def style1(self, style1: int):
"""Sets the style1 of this StyleConfigurationStyleArray.
:param style1: The style1 of this StyleConfigurationStyleArray.
:type style1: int
"""
if style1 is None:
raise ValueError("Invalid value for `style1`, must not be `None`") # noqa: E501
self._style1 = style1
@property
def style2(self) -> int:
"""Gets the style2 of this StyleConfigurationStyleArray.
:return: The style2 of this StyleConfigurationStyleArray.
:rtype: int
"""
return self._style2
@style2.setter
def style2(self, style2: int):
"""Sets the style2 of this StyleConfigurationStyleArray.
:param style2: The style2 of this StyleConfigurationStyleArray.
:type style2: int
"""
self._style2 = style2
@property
def proportion_style1(self) -> float:
"""Gets the proportion_style1 of this StyleConfigurationStyleArray.
:return: The proportion_style1 of this StyleConfigurationStyleArray.
:rtype: float
"""
return self._proportion_style1
@proportion_style1.setter
def proportion_style1(self, proportion_style1: float):
"""Sets the proportion_style1 of this StyleConfigurationStyleArray.
:param proportion_style1: The proportion_style1 of this StyleConfigurationStyleArray.
:type proportion_style1: float
"""
self._proportion_style1 = proportion_style1
| 2.109375
| 2
|
rlgraph/agents/sac_agent.py
|
RLGraph/RLGraph
| 290
|
12777480
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.agents import Agent
from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay
from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction
from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace
from rlgraph.spaces.space_utils import sanity_check_space
from rlgraph.utils import RLGraphError
from rlgraph.utils.decorators import rlgraph_api, graph_fn
from rlgraph.utils.ops import flatten_op, DataOpTuple
from rlgraph.utils.util import strip_list, force_list
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class SyncSpecification(object):
"""Describes a synchronization schedule, used to update the target value weights. The target values are gradually
updates using exponential moving average as suggested by the paper."""
def __init__(self, sync_interval=None, sync_tau=None):
"""
Arguments:
sync_interval: How often to update the target.
sync_tau: The smoothing constant to use in the averaging. Setting to 1 replaces the values each iteration.
"""
self.sync_interval = sync_interval
self.sync_tau = sync_tau
class SACAgentComponent(Component):
def __init__(self, agent, policy, q_function, preprocessor, memory, discount, initial_alpha, target_entropy,
optimizer, vf_optimizer, alpha_optimizer, q_sync_spec, num_q_functions=2):
super(SACAgentComponent, self).__init__(nesting_level=0)
self.agent = agent
self._policy = policy
self._preprocessor = preprocessor
self._memory = memory
self._q_functions = [q_function]
self._q_functions += [q_function.copy(scope="{}-{}".format(q_function.scope, i + 1), trainable=True)
for i in range(num_q_functions - 1)]
# Set number of return values for get_q_values graph_fn.
self.graph_fn_num_outputs["_graph_fn_get_q_values"] = num_q_functions
for q in self._q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in q.sub_components:
q.add_components(Synchronizable(), expose_apis="sync")
self._target_q_functions = [q.copy(scope="target-" + q.scope, trainable=True) for q in self._q_functions]
for target_q in self._target_q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in target_q.sub_components:
target_q.add_components(Synchronizable(), expose_apis="sync")
self._optimizer = optimizer
self.vf_optimizer = vf_optimizer
self.alpha_optimizer = alpha_optimizer
self.initial_alpha = initial_alpha
self.log_alpha = None
self.target_entropy = target_entropy
self.loss_function = SACLossFunction(target_entropy=target_entropy, discount=discount,
num_q_functions=num_q_functions)
memory_items = ["states", "actions", "rewards", "next_states", "terminals"]
self._merger = ContainerMerger(*memory_items)
q_names = ["q_{}".format(i) for i in range(len(self._q_functions))]
self._q_vars_merger = ContainerMerger(*q_names, scope="q_vars_merger")
self.add_components(policy, preprocessor, memory, self._merger, self.loss_function,
optimizer, vf_optimizer, self._q_vars_merger) # , self._q_vars_splitter)
self.add_components(*self._q_functions)
self.add_components(*self._target_q_functions)
if self.alpha_optimizer is not None:
self.add_components(self.alpha_optimizer)
self.steps_since_last_sync = None
self.q_sync_spec = q_sync_spec
self.env_action_space = None
self.episode_reward = None
def check_input_spaces(self, input_spaces, action_space=None):
for s in ["states", "actions", "env_actions", "preprocessed_states", "rewards", "terminals"]:
sanity_check_space(input_spaces[s], must_have_batch_rank=True)
self.env_action_space = input_spaces["env_actions"].flatten()
def create_variables(self, input_spaces, action_space=None):
self.steps_since_last_sync = self.get_variable("steps_since_last_sync", dtype="int", initializer=0)
self.log_alpha = self.get_variable("log_alpha", dtype="float", initializer=np.log(self.initial_alpha))
self.episode_reward = self.get_variable("episode_reward", shape=(), initializer=0.0)
@rlgraph_api
def get_policy_weights(self):
return self._policy.variables()
@rlgraph_api
def get_q_weights(self):
merged_weights = self._q_vars_merger.merge(*[q.variables() for q in self._q_functions])
return merged_weights
@rlgraph_api(must_be_complete=False)
def set_policy_weights(self, weights):
return self._policy.sync(weights)
""" TODO: need to define the input space
@rlgraph_api(must_be_complete=False)
def set_q_weights(self, q_weights):
split_weights = self._q_vars_splitter.call(q_weights)
assert len(split_weights) == len(self._q_functions)
update_ops = [q.sync(q_weights) for q_weights, q in zip(split_weights, self._q_functions)]
update_ops.extend([q.sync(q_weights) for q_weights, q in zip(split_weights, self._target_q_functions)])
return tuple(update_ops)
"""
@rlgraph_api
def preprocess_states(self, states):
return self._preprocessor.preprocess(states)
@rlgraph_api
def insert_records(self, preprocessed_states, env_actions, rewards, next_states, terminals):
records = self._merger.merge(preprocessed_states, env_actions, rewards, next_states, terminals)
return self._memory.insert_records(records)
@rlgraph_api
def update_from_memory(self, batch_size=64, time_percentage=None):
records, sample_indices, importance_weights = self._memory.get_records(batch_size)
result = self.update_from_external_batch(
records["states"], records["actions"], records["rewards"], records["terminals"],
records["next_states"], importance_weights, time_percentage
)
if isinstance(self._memory, PrioritizedReplay):
update_pr_step_op = self._memory.update_records(sample_indices, result["critic_loss_per_item"])
result["update_pr_step_op"] = update_pr_step_op
return result
@rlgraph_api
def update_from_external_batch(
self, preprocessed_states, env_actions, rewards, terminals, next_states, importance_weights,
time_percentage=None
):
actions = self._graph_fn_one_hot(env_actions)
actor_loss, actor_loss_per_item, critic_loss, critic_loss_per_item, alpha_loss, alpha_loss_per_item = \
self.get_losses(preprocessed_states, actions, rewards, terminals, next_states, importance_weights)
policy_vars = self._policy.variables()
q_vars = [q_func.variables() for q_func in self._q_functions]
merged_q_vars = self._q_vars_merger.merge(*q_vars)
critic_step_op = self.vf_optimizer.step(merged_q_vars, critic_loss, critic_loss_per_item, time_percentage)
actor_step_op = self._optimizer.step(policy_vars, actor_loss, actor_loss_per_item, time_percentage)
if self.target_entropy is not None:
alpha_step_op = self._graph_fn_update_alpha(alpha_loss, alpha_loss_per_item, time_percentage)
else:
alpha_step_op = self._graph_fn_no_op()
# TODO: optimizer for alpha
sync_op = self.sync_targets()
# Increase the global training step counter.
alpha_step_op = self._graph_fn_training_step(alpha_step_op)
return dict(
actor_step_op=actor_step_op,
critic_step_op=critic_step_op,
sync_op=sync_op,
alpha_step_op=alpha_step_op,
actor_loss=actor_loss,
actor_loss_per_item=actor_loss_per_item,
critic_loss=critic_loss,
critic_loss_per_item=critic_loss_per_item,
alpha_loss=alpha_loss,
alpha_loss_per_item=alpha_loss_per_item
)
@graph_fn(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)
def _graph_fn_one_hot(self, key, env_actions):
if isinstance(self.env_action_space[key], IntBox):
env_actions = tf.one_hot(env_actions, depth=self.env_action_space[key].num_categories, axis=-1)
return env_actions
@graph_fn(requires_variable_completeness=True)
def _graph_fn_update_alpha(self, alpha_loss, alpha_loss_per_item, time_percentage=None):
alpha_step_op = self.alpha_optimizer.step(
DataOpTuple([self.log_alpha]), alpha_loss, alpha_loss_per_item, time_percentage
)
return alpha_step_op
@rlgraph_api # `returns` are determined in ctor
def _graph_fn_get_q_values(self, preprocessed_states, actions, target=False):
backend = get_backend()
flat_actions = flatten_op(actions)
actions = []
for flat_key, action_component in self._policy.action_space.flatten().items():
actions.append(flat_actions[flat_key])
if backend == "tf":
actions = tf.concat(actions, axis=-1)
elif backend == "pytorch":
actions = torch.cat(actions, dim=-1)
q_funcs = self._q_functions if target is False else self._target_q_functions
# We do not concat states yet because we might pass states through a conv stack before merging it
# with actions.
return tuple(q.state_action_value(preprocessed_states, actions) for q in q_funcs)
@rlgraph_api
def get_losses(self, preprocessed_states, actions, rewards, terminals, next_states, importance_weights):
# TODO: internal states
samples_next = self._policy.get_action_and_log_likelihood(next_states, deterministic=False)
next_sampled_actions = samples_next["action"]
log_probs_next_sampled = samples_next["log_likelihood"]
q_values_next_sampled = self.get_q_values(
next_states, next_sampled_actions, target=True
)
q_values = self.get_q_values(preprocessed_states, actions)
samples = self._policy.get_action_and_log_likelihood(preprocessed_states, deterministic=False)
sampled_actions = samples["action"]
log_probs_sampled = samples["log_likelihood"]
q_values_sampled = self.get_q_values(preprocessed_states, sampled_actions)
alpha = self._graph_fn_compute_alpha()
return self.loss_function.loss(
alpha,
log_probs_next_sampled,
q_values_next_sampled,
q_values,
log_probs_sampled,
q_values_sampled,
rewards,
terminals
)
@rlgraph_api
def get_preprocessed_state_and_action(self, states, deterministic=False):
preprocessed_states = self._preprocessor.preprocess(states)
return self.action_from_preprocessed_state(preprocessed_states, deterministic)
@rlgraph_api
def action_from_preprocessed_state(self, preprocessed_states, deterministic=False):
out = self._policy.get_action(preprocessed_states, deterministic=deterministic)
return out["action"], preprocessed_states
@rlgraph_api(requires_variable_completeness=True)
def reset_targets(self):
ops = (target_q.sync(q.variables()) for q, target_q in zip(self._q_functions, self._target_q_functions))
return tuple(ops)
@rlgraph_api(requires_variable_completeness=True)
def sync_targets(self):
should_sync = self._graph_fn_get_should_sync()
return self._graph_fn_sync(should_sync)
@rlgraph_api
def get_memory_size(self):
return self._memory.get_size()
@graph_fn
def _graph_fn_compute_alpha(self):
backend = get_backend()
if backend == "tf":
return tf.exp(self.log_alpha)
elif backend == "pytorch":
return torch.exp(self.log_alpha)
# TODO: Move this into generic AgentRootComponent.
@graph_fn
def _graph_fn_training_step(self, other_step_op=None):
if self.agent is not None:
add_op = tf.assign_add(self.agent.graph_executor.global_training_timestep, 1)
op_list = [add_op] + [other_step_op] if other_step_op is not None else []
with tf.control_dependencies(op_list):
return tf.no_op() if other_step_op is None else other_step_op
else:
return tf.no_op() if other_step_op is None else other_step_op
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_get_should_sync(self):
if get_backend() == "tf":
inc_op = tf.assign_add(self.steps_since_last_sync, 1)
should_sync = inc_op >= self.q_sync_spec.sync_interval
def reset_op():
op = tf.assign(self.steps_since_last_sync, 0)
with tf.control_dependencies([op]):
return tf.no_op()
sync_op = tf.cond(
pred=inc_op >= self.q_sync_spec.sync_interval,
true_fn=reset_op,
false_fn=tf.no_op
)
with tf.control_dependencies([sync_op]):
return tf.identity(should_sync)
else:
raise NotImplementedError("TODO")
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_sync(self, should_sync):
assign_ops = []
tau = self.q_sync_spec.sync_tau
if tau != 1.0:
all_source_vars = [source.get_variables(collections=None, custom_scope_separator="-") for source in self._q_functions]
all_dest_vars = [destination.get_variables(collections=None, custom_scope_separator="-") for destination in self._target_q_functions]
for source_vars, dest_vars in zip(all_source_vars, all_dest_vars):
for (source_key, source_var), (dest_key, dest_var) in zip(sorted(source_vars.items()), sorted(dest_vars.items())):
assign_ops.append(tf.assign(dest_var, tau * source_var + (1.0 - tau) * dest_var))
else:
all_source_vars = [source.variables() for source in self._q_functions]
for source_vars, destination in zip(all_source_vars, self._target_q_functions):
assign_ops.append(destination.sync(source_vars))
assert len(assign_ops) > 0
grouped_op = tf.group(assign_ops)
def assign_op():
# Make sure we are returning no_op as opposed to reference
with tf.control_dependencies([grouped_op]):
return tf.no_op()
cond_assign_op = tf.cond(should_sync, true_fn=assign_op, false_fn=tf.no_op)
with tf.control_dependencies([cond_assign_op]):
return tf.no_op()
@graph_fn
def _graph_fn_no_op(self):
return tf.no_op()
@rlgraph_api
def get_global_timestep(self):
return self.read_variable(self.agent.graph_executor.global_timestep)
@rlgraph_api
def _graph_fn_update_global_timestep(self, increment):
if get_backend() == "tf":
add_op = tf.assign_add(self.agent.graph_executor.global_timestep, increment)
return add_op
elif get_backend == "pytorch":
self.agent.graph_executor.global_timestep += increment
return self.agent.graph_executor.global_timestep
@rlgraph_api
def _graph_fn_get_episode_reward(self):
return self.episode_reward
@rlgraph_api
def _graph_fn_set_episode_reward(self, episode_reward):
return tf.assign(self.episode_reward, episode_reward)
class SACAgent(Agent):
def __init__(
self,
state_space,
action_space,
discount=0.98,
preprocessing_spec=None,
network_spec=None,
internal_states_space=None,
policy_spec=None,
value_function_spec=None,
execution_spec=None,
optimizer_spec=None,
value_function_optimizer_spec=None,
observe_spec=None,
update_spec=None,
summary_spec=None,
saver_spec=None,
auto_build=True,
name="sac-agent",
double_q=True,
initial_alpha=1.0,
gumbel_softmax_temperature=1.0,
target_entropy=None,
memory_spec=None,
value_function_sync_spec=None
):
"""
This is an implementation of the Soft-Actor Critic algorithm.
Paper: http://arxiv.org/abs/1801.01290
Args:
state_space (Union[dict,Space]): Spec dict for the state Space or a direct Space object.
action_space (Union[dict,Space]): Spec dict for the action Space or a direct Space object.
preprocessing_spec (Optional[list,PreprocessorStack]): The spec list for the different necessary states
preprocessing steps or a PreprocessorStack object itself.
discount (float): The discount factor (gamma).
network_spec (Optional[list,NeuralNetwork]): Spec list for a NeuralNetwork Component or the NeuralNetwork
object itself.
internal_states_space (Optional[Union[dict,Space]]): Spec dict for the internal-states Space or a direct
Space object for the Space(s) of the internal (RNN) states.
policy_spec (Optional[dict]): An optional dict for further kwargs passing into the Policy c'tor.
value_function_spec (list, dict, ValueFunction): Neural network specification for baseline or instance
of ValueFunction.
execution_spec (Optional[dict,Execution]): The spec-dict specifying execution settings.
optimizer_spec (Optional[dict,Optimizer]): The spec-dict to create the Optimizer for this Agent.
value_function_optimizer_spec (dict): Optimizer config for value function optimizer. If None, the optimizer
spec for the policy is used (same learning rate and optimizer type).
observe_spec (Optional[dict]): Spec-dict to specify `Agent.observe()` settings.
update_spec (Optional[dict]): Spec-dict to specify `Agent.update()` settings.
summary_spec (Optional[dict]): Spec-dict to specify summary settings.
saver_spec (Optional[dict]): Spec-dict to specify saver settings.
auto_build (Optional[bool]): If True (default), immediately builds the graph using the agent's
graph builder. If false, users must separately call agent.build(). Useful for debugging or analyzing
components before building.
name (str): Some name for this Agent object.
double_q (bool): Whether to train two q networks independently.
initial_alpha (float): "The temperature parameter α determines the
relative importance of the entropy term against the reward".
gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used
for discrete actions.
memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm.
update_spec (dict): Here we can have sync_interval or sync_tau (for the value network update).
"""
# If VF spec is a network spec, wrap with SAC vf type. The VF must concatenate actions and states,
# which can require splitting the network in the case of e.g. conv-inputs.
if isinstance(value_function_spec, list):
value_function_spec = dict(type="sac_value_function", network_spec=value_function_spec)
self.logger.info("Using default SAC value function.")
elif isinstance(value_function_spec, ValueFunction):
self.logger.info("Using value function object {}".format(ValueFunction))
if policy_spec is None:
# Continuous action space: Use squashed normal.
# Discrete: Gumbel-softmax.
policy_spec = dict(deterministic=False,
distributions_spec=dict(
bounded_distribution_type="squashed",
discrete_distribution_type="gumbel_softmax",
gumbel_softmax_temperature=gumbel_softmax_temperature
))
super(SACAgent, self).__init__(
state_space=state_space,
action_space=action_space,
discount=discount,
preprocessing_spec=preprocessing_spec,
network_spec=network_spec,
internal_states_space=internal_states_space,
policy_spec=policy_spec,
value_function_spec=value_function_spec,
execution_spec=execution_spec,
optimizer_spec=optimizer_spec,
value_function_optimizer_spec=value_function_optimizer_spec,
observe_spec=observe_spec,
update_spec=update_spec,
summary_spec=summary_spec,
saver_spec=saver_spec,
auto_build=auto_build,
name=name
)
self.double_q = double_q
self.target_entropy = target_entropy
self.initial_alpha = initial_alpha
# Assert that the synch interval is a multiple of the update_interval.
if "sync_interval" in self.update_spec:
if self.update_spec["sync_interval"] / self.update_spec["update_interval"] != \
self.update_spec["sync_interval"] // self.update_spec["update_interval"]:
raise RLGraphError(
"ERROR: sync_interval ({}) must be multiple of update_interval "
"({})!".format(self.update_spec["sync_interval"], self.update_spec["update_interval"])
)
elif "sync_tau" in self.update_spec:
if self.update_spec["sync_tau"] <= 0 or self.update_spec["sync_tau"] > 1.0:
raise RLGraphError(
"sync_tau ({}) must be in interval (0.0, 1.0]!".format(self.update_spec["sync_tau"])
)
else:
self.update_spec["sync_tau"] = 0.005 # The value mentioned in the paper
# Extend input Space definitions to this Agent's specific API-methods.
preprocessed_state_space = self.preprocessed_state_space.with_batch_rank()
reward_space = FloatBox(add_batch_rank=True)
terminal_space = BoolBox(add_batch_rank=True)
#self.iterations = self.update_spec["num_iterations"]
self.batch_size = self.update_spec["batch_size"]
float_action_space = self.action_space.with_batch_rank().map(
mapping=lambda flat_key, space: space.as_one_hot_float_space() if isinstance(space, IntBox) else space
)
self.input_spaces.update(dict(
env_actions=self.action_space.with_batch_rank(),
actions=float_action_space,
preprocessed_states=preprocessed_state_space,
rewards=reward_space,
terminals=terminal_space,
next_states=preprocessed_state_space,
states=self.state_space.with_batch_rank(add_batch_rank=True),
batch_size=int,
importance_weights=FloatBox(add_batch_rank=True),
deterministic=bool,
weights="variables:{}".format(self.policy.scope)
))
if value_function_sync_spec is None:
value_function_sync_spec = SyncSpecification(
sync_interval=self.update_spec["sync_interval"] // self.update_spec["update_interval"],
sync_tau=self.update_spec["sync_tau"] if "sync_tau" in self.update_spec else 5e-3
)
self.memory = Memory.from_spec(memory_spec)
self.alpha_optimizer = self.optimizer.copy(scope="alpha-" + self.optimizer.scope) if self.target_entropy is not None else None
self.root_component = SACAgentComponent(
agent=self,
policy=self.policy,
q_function=self.value_function,
preprocessor=self.preprocessor,
memory=self.memory,
discount=self.discount,
initial_alpha=self.initial_alpha,
target_entropy=target_entropy,
optimizer=self.optimizer,
vf_optimizer=self.value_function_optimizer,
alpha_optimizer=self.alpha_optimizer,
q_sync_spec=value_function_sync_spec,
num_q_functions=2 if self.double_q is True else 1
)
extra_optimizers = [self.value_function_optimizer]
if self.alpha_optimizer is not None:
extra_optimizers.append(self.alpha_optimizer)
self.build_options = dict(optimizers=extra_optimizers)
if self.auto_build:
self._build_graph(
[self.root_component], self.input_spaces, optimizer=self.optimizer,
batch_size=self.update_spec["batch_size"],
build_options=self.build_options
)
self.graph_built = True
def set_weights(self, policy_weights, value_function_weights=None):
# TODO: Overrides parent but should this be policy of value function?
return self.graph_executor.execute((self.root_component.set_policy_weights, policy_weights))
def get_weights(self):
return dict(policy_weights=self.graph_executor.execute(self.root_component.get_policy_weights))
def get_action(self, states, internals=None, use_exploration=True, apply_preprocessing=True, extra_returns=None,
time_percentage=None):
# TODO: common pattern - move to Agent
"""
Args:
extra_returns (Optional[Set[str],str]): Optional string or set of strings for additional return
values (besides the actions). Possible values are:
- 'preprocessed_states': The preprocessed states after passing the given states through the
preprocessor stack.
- 'internal_states': The internal states returned by the RNNs in the NN pipeline.
- 'used_exploration': Whether epsilon- or noise-based exploration was used or not.
Returns:
tuple or single value depending on `extra_returns`:
- action
- the preprocessed states
"""
extra_returns = {extra_returns} if isinstance(extra_returns, str) else (extra_returns or set())
# States come in without preprocessing -> use state space.
if apply_preprocessing:
call_method = self.root_component.get_preprocessed_state_and_action
batched_states, remove_batch_rank = self.state_space.force_batch(states)
else:
call_method = self.root_component.action_from_preprocessed_state
batched_states = states
remove_batch_rank = False
#remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1
# Increase timesteps by the batch size (number of states in batch).
batch_size = len(batched_states)
self.timesteps += batch_size
# Control, which return value to "pull" (depending on `additional_returns`).
return_ops = [0, 1] if "preprocessed_states" in extra_returns else [0]
ret = force_list(self.graph_executor.execute((
call_method,
[batched_states, not use_exploration], # deterministic = not use_exploration
# 0=preprocessed_states, 1=action
return_ops
)))
# Convert Gumble (relaxed one-hot) sample back into int type for all discrete composite actions.
if isinstance(self.action_space, ContainerSpace):
ret[0] = ret[0].map(
mapping=lambda key, action: np.argmax(action, axis=-1).astype(action.dtype)
if isinstance(self.flat_action_space[key], IntBox) else action
)
elif isinstance(self.action_space, IntBox):
ret[0] = np.argmax(ret[0], axis=-1).astype(self.action_space.dtype)
if remove_batch_rank:
ret[0] = strip_list(ret[0])
if "preprocessed_states" in extra_returns:
return ret[0], ret[1]
else:
return ret[0]
def _observe_graph(self, preprocessed_states, actions, internals, rewards, next_states, terminals):
self.graph_executor.execute((self.root_component.insert_records, [preprocessed_states, actions, rewards, next_states, terminals]))
def update(self, batch=None, time_percentage=None, **kwargs):
if batch is None:
size = self.graph_executor.execute(self.root_component.get_memory_size)
# TODO: is this necessary?
if size < self.batch_size:
return 0.0, 0.0, 0.0
ret = self.graph_executor.execute((self.root_component.update_from_memory, [self.batch_size, time_percentage]))
else:
ret = self.graph_executor.execute((self.root_component.update_from_external_batch, [
batch["states"], batch["actions"], batch["rewards"], batch["terminals"], batch["next_states"],
batch["importance_weights"], time_percentage
]))
return ret["actor_loss"], ret["actor_loss_per_item"], ret["critic_loss"], ret["alpha_loss"]
def reset(self):
"""
Resets our preprocessor, but only if it contains stateful PreprocessLayer Components (meaning
the PreprocessorStack has at least one variable defined).
"""
if self.preprocessing_required and len(self.preprocessor.variables) > 0:
self.graph_executor.execute("reset_preprocessor")
self.graph_executor.execute(self.root_component.reset_targets)
def __repr__(self):
return "SACAgent(double-q={}, initial-alpha={}, target-entropy={})".format(
self.double_q, self.initial_alpha, self.target_entropy
)
| 1.804688
| 2
|
euler-problems/euler-problem-3.py
|
sdenisen/test
| 0
|
12777481
|
<gh_stars>0
__author__ = 'sdenisenko'
target_number = 600851475143
def isNatural(item, naturals):
for nat_number in naturals:
if not item % nat_number:
break
else:
return True
return False
def getNaturals(n):
naturals = []
for i in range(2, n):
if isNatural(i, naturals):
naturals.append(i)
naturals.insert(0, 1)
return naturals
def getNaturals(start, stop, naturals):
for i in range(start, stop):
if isNatural(i, naturals):
naturals.append(i)
return naturals
def checkTargetNumber(target_number):
naturals = []
result = []
last = 0
i = 2
iteration = 1000
naturals = getNaturals(2, iteration, naturals)
while (True):
if i >= iteration:
next_step = iteration + 1000
naturals = getNaturals(iteration, next_step, naturals)
iteration = next_step
result = [i for i in naturals if not target_number % i]
if last !=result[-1:][0]:
print result[-1:]
last = result[-1:][0]
if i >= target_number:
break
i += 1000
return naturals
#print getNaturals(102)
print checkTargetNumber(target_number)
| 3.21875
| 3
|
easy/sum of digits in base k/solution.py
|
ilya-sokolov/leetcode
| 4
|
12777482
|
<gh_stars>1-10
class Solution:
def sumBase(self, n: int, k: int) -> int:
result = 0
while n > 0:
result += n % k
n = n // k
return result
s = Solution()
print(s.sumBase(34, 6))
print(s.sumBase(10, 10))
print(s.sumBase(10, 9))
print(s.sumBase(7, 2))
print(s.sumBase(255, 2))
print(s.sumBase(999, 10))
print(s.sumBase(3, 3))
print(s.sumBase(88, 8))
print(s.sumBase(777, 7))
print(s.sumBase(1024, 4))
| 3.0625
| 3
|
dev-burst-analysis.py
|
bcodegard/xrd-analysis
| 0
|
12777483
|
<reponame>bcodegard/xrd-analysis
"""
separate a dataset into segments separated by points which
exceed a thredhold for a branch or its derivative, and then
perform analysis on the separated datasets.
typical case is to use the derivative of a timestamp variable,
in which case the threshold is a time separation between events.
"""
__author__ = "<NAME>"
__version__ = "0.1"
import argparse
import sys
import os
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import utils.fileio as fileio
import utils.data as data
import utils.model as model
import utils.display as display
import utils.cli as cli
ROOT_FILE_DEFAULT = '../xrd-analysis/data/root/scintillator/Run{}.root'
FIG_LOC = "./figs/{}.png"
def procure_cluster_data(args, extra_branches=set()):
""""""
# extra_branches -> set
if not (type(extra_branches) is set):
extra_branches = set(extra_branches)
# run -> path to file
if os.sep in args.run:
root_file = args.run
else:
root_file = ROOT_FILE_DEFAULT.format(args.run)
assert os.path.exists(root_file)
# compose list of needed branches
# get all keys present in root file
branches_all = fileio.get_keys(root_file)
branches_use = extra_branches
# cluster branch
cluster_branch = next(_ for _ in sorted(branches_all) if _.startswith(args.cluster_branch))
branches_use |= {cluster_branch}
# fit
branches_use |= {args.fit[0]}
# cuts
cut_branches = {_[0] for _ in args.cut}
branches_use |= cut_branches
# load branches and create manager instance
branches = fileio.load_branches(root_file, branches_use)
bm = data.BranchManager(branches, export_copies=False, import_copies=False)
# apply cuts
# if args.cut:
cuts = [data.cut(*_) for _ in [args.fit] + args.cut]
bm.mask(data.mask_all(*cuts),apply_mask=True)
# fixes and tweaks
bm.bud([data.bud_entry])
bm.bud([data.fix_monotonic_timestamp()],overwrite=True)
bm.bud([data.localize_timestamp()],overwrite=True)
# differentiate if requested
if args.cluster_diff:
if args.cluster_diff > 1:
print("WARNING: differentiation with kernel size > 1 not yet implemented")
bm.bud([data.differentiate_branch(cluster_branch,suffix="deriv")])
cluster_branch_final = '_'.join([cluster_branch,"deriv"])
else:
cluster_branch_final = cluster_branch
# make cluster index branch
bm.bud([data.count_passing(data.cut(cluster_branch_final,args.cluster_threshold),"cluster_index")])
return bm
def analyze_fourier(args):
branch_use = "vMax_3046_1"
branches_use = {branch_use, "timestamp_3046_1"}
bm = procure_cluster_data(args, branches_use)
bcut = bm.mask(data.cut(branch_use,10,900), branch_use)
# bcut = np.random.normal(np.linspace(500,505,bcut.size), 50, bcut.shape)
# plt.hist(bcut, bins=np.linspace(0,1000,500))
# plt.show()
dur = bm["timestamp_3046_1"][-1] - bm["timestamp_3046_1"][0]
sep = dur / bcut.size
print(dur, sep)
freq = np.fft.rfftfreq(bcut.size, sep)
bfft = np.fft.rfft(bcut)
# print(freq[:10])
# print(freq[-10:])
# plt.plot(1/(freq[1:]), abs(bfft[1:]), 'k.')
# plt.xlabel("1/frequency")
# plt.ylabel("amplitude")
# plt.xscale('log')
# plt.yscale('log')
# plt.show()
T = 1/(freq[1:])
A = abs(bfft[1:])
print(T[:10])
print(A[:10])
nbins = 200
display.pairs2d(
[T,A],
[np.logspace(math.log(min(T),10),math.log(max(T),10),nbins), np.logspace(math.log(min(A),10),math.log(max(A),10),nbins)],
[True,True],
["period","amplitude"],
)
plt.show()
def show_clustering(args):
bm=procure_cluster_data(args)
t = bm["timestamp_3046_1"]; tm = t.max()
f = bm[args.fit[0]] ; fm = f.max()
c = bm["cluster_index"] ; cm = c.max()
plt.plot(bm["entry"], bm["timestamp_3046_1"] / tm, marker='' , ls='-', color='k', label='time / {:.2f}'.format(tm))
# plt.plot(bm["entry"], bm[args.fit[0]] / fm, marker='.', ls='' , color='g', label='{} / {:.2f}'.format(args.fit[0],fm))
plt.plot(bm["entry"], bm["cluster_index"] / cm, marker='.', ls='' , color='darkred', label='cluster / {}'.format(cm))
plt.xlabel('entry')
plt.legend()
plt.show()
def analyze_drift(args, ):
# branches_use = {"timestamp_3046_1","area_3046_1","vMax_3046_1","tMax_3046_1","scaler_3046_1"}
bm = procure_cluster_data(args)
ci = bm["cluster_index"]
n_clusters = ci.max() + 1
cluster_nev = []
cluster_i = []
cluster_fit = []
nc = 5 if args.delta[0] else 3
fig,ax = plt.subplots(figsize=(nc*5,5))
fig.subplots_adjust(
top=0.981,
bottom=0.049,
left=0.04,
right=0.96,
hspace=0.2,
wspace=0.2,
)
# list of components for fit model
fit_model_components = []
# add background components
if "q" in args.bg:
fit_model_components.append(model.quadratic())
elif "l" in args.bg:
fit_model_components.append(model.line())
elif "c" in args.bg:
fit_model_components.append(model.constant([[0,np.inf]]))
if "e" in args.bg:
fit_model_components.append(model.exponential())
# store number of parameters used by background components
n_bg_parameters = sum([_.npars for _ in fit_model_components])
# add gaussians
gaus_names = []
for ig,g in enumerate(args.gaus):
gaus_names.append(g[0])
# re-arrange so as to have mu bounds specified first
this_bounds = [[g[5],g[6]], [g[1],g[2]], [g[3],g[4]]]
fit_model_components.append(model.gaussian(this_bounds))
# compose model
fit_model = fit_model_components[0]
for component in fit_model_components[1:]:
fit_model = fit_model + component
has_dependent_components = any([args.gaus_linear_dep, ])
if has_dependent_components:
# compose fit model containing dependent components as free components
dependent_components = []
gaus_linear_dep_names = []
for ig,g in enumerate(args.gaus_linear_dep):
gaus_linear_dep_names.append(g[0])
dependent_components.append(model.gaussian())
for ic,c in enumerate(dependent_components):
if ic==0:
fit_model_with_dependents = fit_model + c
else:
fit_model_with_dependents = fit_model_with_dependents + c
# compose metamodel with direct parameterization of free components' parameters
# and liner transformations of those parameters for dependent components' parameters
xfp = []
# add literal parameters of fit model
for ip in range(fit_model.npars):
unitvec = np.zeros(fit_model.npars,dtype=float)
unitvec[ip] = 1.0
xfp.append(unitvec)
# add scaled parameters for linearly dependent gaussians
for ig,g in enumerate(args.gaus_linear_dep):
# find index of independent gaussian with matching name
indep_ig = next(i for i,_ in enumerate(gaus_names) if _ == g[0])
# calculate starting index of that gaussian's parameters in fit_model
indep_ip_start = n_bg_parameters + 3*indep_ig
# add transformations
for jp,pscale in enumerate(g[1:]):
scaled_unitvec = np.zeros(fit_model.npars,dtype=float)
scaled_unitvec[indep_ip_start+jp] = pscale
xfp.append(scaled_unitvec)
fit_metamodel = model.metamodel(fit_model_with_dependents, xfp, bounds=fit_model.bounds)
eval_model = fit_metamodel
else:
eval_model = fit_model
# calculate bins
# bins = np.linspace(args.fit[1], args.fit[2], args.bins)
bins = np.linspace(bm[args.fit[0]].min(), bm[args.fit[0]].max(), args.bins+1)
for i in range(n_clusters):
print("cluster index {}, count {} / {}".format(i, i+1,n_clusters))
mask = data.cut("cluster_index", i - 0.1, i + 0.1)
masked_branches = bm.mask(mask, {"timestamp_3046_1",args.fit[0]})
this_t = masked_branches["timestamp_3046_1"]
this_data = masked_branches[args.fit[0]]
this_nev = this_t.size
counts, edges = np.histogram(this_data, bins=bins)
midpoints = (edges[1:] + edges[:-1])*0.5
if has_dependent_components:
popt, pcov, chi2, ndof = fit_metamodel.fit(midpoints, counts, p0=popt if i>0 else fit_model.guess(midpoints,counts))
else:
popt, pcov, chi2, ndof = fit_model.fit(midpoints, counts, p0=popt if i>0 else None)
if not i:
plt.subplot(1,nc,1)
plt.step(midpoints, counts, where='mid', color="k", label="data")
plt.plot(midpoints, eval_model(midpoints, *popt), 'g-', label="best fit")
plt.title("run {}, cluster {}\nchi2/ndof={:.2f}/{}={:.2f}".format(args.run,i,chi2,ndof,chi2/ndof))
# plt.show()
cluster_nev.append(this_t.size)
cluster_i.append(i)
cluster_fit.append([popt, pcov, chi2, ndof])
# print("{:<3} - {} - {} - {} - {} - {}".format(i, this_t.size, [round(_,3) for _ in popt], [round(_,3) for _ in pcov], chi2, ndof))
cluster_i = np.array(cluster_i )
cluster_nev = np.array(cluster_nev)
plt.subplot(1,nc,2)
plt.plot(cluster_i, cluster_nev, 'k.', )
plt.xlabel('cluster index')
plt.ylabel('number of events')
plt.title('number of events per cluster\nRun {}'.format(args.run))
# plt.show()
cluster_popt = np.stack([_[0] for _ in cluster_fit],axis=0)
cluster_pcov = np.stack([_[1] for _ in cluster_fit],axis=0)
cluster_chi2 = np.array([_[2] for _ in cluster_fit])
cluster_ndof = np.array([_[3] for _ in cluster_fit])
cluster_chi2_per_ndof = cluster_chi2 / cluster_ndof
print("chi2/ndof mean,std")
print(cluster_chi2_per_ndof.mean(), cluster_chi2_per_ndof.std())
print("cluster_popt mean,std; cluster_pcov mean,std")
print(fit_model.pnames)
for k in range(cluster_popt.shape[1]):
po = cluster_popt[:,k]
pc = cluster_pcov[:,k]
# print(" ".join([str(_) for _ in po]))
# print(" ".join([str(_) for _ in pc]))
print(po.mean(), po.std(), pc.mean(), pc.std(), sep=',',end=',')
print("")
# plot parameters
poi = args.poi if args.poi>=0 else fit_model.npars+args.poi
qopt = cluster_popt[:,poi]
qcov = cluster_pcov[:,poi]
pm_const = model.constant()
pm_popt, pm_pcov, pm_chi2, pm_ndof = pm_const.fit_with_errors(cluster_i, qopt, xerr=None, yerr=qcov)
plt.subplot(1,nc,3)
colors = ['k','g','b','darkred','tab:brown','m','c','tab:red',"peru","orange","olive","teal","tab:purple"]
for j in range(fit_model.npars):
if j!=poi:
continue
this_popt, this_pcov, this_chi2, this_ndof = pm_const.fit_with_errors(cluster_i, cluster_popt[:,j], xerr=None, yerr=cluster_pcov[:,j])
this_label = "{}: {:.3f}".format(fit_model.pnames[j], this_chi2/this_ndof)
plt.errorbar(cluster_i, cluster_popt[:,j], cluster_pcov[:,j], color=colors[j], ls='', marker='.', label=this_label)
plt.plot(cluster_i, pm_const(cluster_i, *this_popt), color=colors[j], ls='--')
plt.xlabel('cluster index')
plt.ylabel('parameter values')
plt.title("fit parameter values per cluster, run {}\n{} fit chi2/dof = {:.2f}/{} = {:.2f}".format(args.run, fit_model.pnames[poi], pm_chi2, pm_ndof, pm_chi2/pm_ndof))
plt.legend()
# plt.show()
chi_model = model.gaus([[0,np.inf],[-np.inf,np.inf],[0,np.inf]])
# analyze parameters over cluster index
if args.delta[0]:
isep = args.delta[0]
d = (qopt[isep:] - qopt[:-isep] )
d_var = (qcov[isep:]**2 + qcov[:-isep]**2)
d_std = np.sqrt(d_var)
d_ind = cluster_i[isep:]
# slice d and associated arrays with delta[1:4]
d = d [slice(*args.delta[1:4])]
d_var = d_var[slice(*args.delta[1:4])]
d_std = d_std[slice(*args.delta[1:4])]
d_ind = d_ind[slice(*args.delta[1:4])]
fit_d = pm_const.fit_with_errors(d_ind, d, xerr=None, yerr=np.sqrt(d_var))
chi2_d_zero = ((d**2) / d_var).sum()
ndof_d_zero = d.size
# plt.errorbar(cluster_i, qopt-np.mean(qopt), qcov, color='k', ls='', marker='.', label='mu[i] - avg(mu)')
plt.subplot(1,nc,4)
plt.errorbar(d_ind, d, d_std, color='darkred', ls='', marker='.', label="{0}[i] - {0}[i-{1}]".format(fit_model.pnames[poi],isep))
plt.plot(d_ind, pm_const(d_ind,*fit_d[0]), color='r', ls='--', marker='', label='constant fit to difference')
plt.axhline(0, color='b', ls='-', label='zero')
plt.xlabel("cluster index")
plt.ylabel("difference between clusters")
plt.title("c=0: chi2/dof = {:.2f} / {} = {:.3f} \nc={:.3f}: chi2/dof = {:.2f} / {} = {:.3f}".format(
chi2_d_zero, ndof_d_zero, chi2_d_zero/ndof_d_zero,
fit_d[0][0],fit_d[2],fit_d[3],fit_d[2]/fit_d[3],
))
plt.legend()
# plt.show()
# analyze distribution of chi
d_chi = d / d_std
# max_abs_chi = abs(d_chi).max()
bins_chi = np.linspace(d_chi.min()-1,d_chi.max()+1,20+1)
counts_chi, edges_chi = np.histogram(d_chi, bins=bins_chi)
midpoints_chi = (edges_chi[1:] + edges_chi[:-1])*0.5
popt_chi, perr_chi, chisq_chi, ndof_chi = chi_model.fit(midpoints_chi, counts_chi)
plt.subplot(1,nc,5)
plt.step(midpoints_chi, counts_chi, where='mid', color="k", label="data")
plt.plot(midpoints_chi, chi_model(midpoints_chi, *popt_chi), 'g-', label="best fit")
popt_chi_string = ", ".join(["{}={:.2f}\xb1{:.3f}".format(_,popt_chi[i], perr_chi[i]) for i,_ in enumerate(chi_model.pnames)])
plt.title("run {}, change in {}, chi2/ndof={:.2f}/{}={:.3f}\n{}".format(args.run,fit_model.pnames[poi],chisq_chi,ndof_chi,chisq_chi/ndof_chi,popt_chi_string))
plt.xlabel("chi = delta_ij / err(delta_ij)")
plt.ylabel("counts")
plt.tight_layout()
if args.fig:
# just filename: save in ./figs/
if not (os.sep in args.fig):
fig_file = FIG_LOC.format(args.fig)
else:
fig_file = args.fig
# save the figure to an image file
plt.savefig(fig_file)
plt.show()
def main(args):
print(args)
print("")
routine = args.do[0]
if routine == "drift":
print("performing drift analysis\n")
analyze_drift(args)
elif routine == "fourier":
print("performing fourier analysis\n")
analyze_fourier(args)
elif routine == "show":
print("showing clustering results\n")
show_clustering(args)
else:
print("unrecognized analysis routine: {}\n".format(routine))
return
if __name__ == '__main__':
# main(None)
# sys.exit(0)
parser = argparse.ArgumentParser(
description="analysis using cluster identification to separate dataset into subsets",
)
# dataset specification
parser.add_argument("run",type=str,help="file location, name, or number")
parser.add_argument("fit",type=str,nargs="+",action=cli.MergeAction,const=((str,float),("",-np.inf,np.inf)),help="branch low=-inf hi=inf")
parser.add_argument(
"--cut","--c",
type=str,
nargs="+",
action=cli.MergeAppendAction,
const=((str,float),("",-np.inf,np.inf)),
default=[],
help="cut on (lo<branch<hi): --c branch lo=-inf hi=inf"
)
# fitting specification
parser.add_argument("--bins",type=int,default=100,help="number of bins to use")
parser.add_argument("--bg" ,type=str,nargs="?",const="",default="c",help="background function: any combination of (p)ower (e)xp (c)onstant (l)ine (q)uadratic")
parser.add_argument(
"--gaus","--g",
type=str,
nargs="+",
action=cli.MergeAppendAction,
const=((str,float),("",-np.inf,np.inf,0.0,np.inf,0.0,np.inf)),
default=[],
help="gaussian: name='' mu_lo=-inf mu_hi=inf sigma_lo=0 sigma_hi=inf c_lo=0 c_hi=inf"
)
parser.add_argument(
'--gaus-linear-dep','--gl',
type=str,
nargs="+",
action=cli.MergeAppendAction,
const=((str,float),("",1.0,1.0,1.0)),
default=[],
help="constrained gaus, parameters are linear scaling of gaus with same name. --gl name mu_ratio sigma_ratio c_ratio"
)
# clustering details
parser.add_argument("--cluster-branch" ,"--cb",type=str ,default="timestamp_",help="branch used to determine clusters")
parser.add_argument("--cluster-diff" ,"--cd",type=int ,default=1 ,help="differentiate cluster branch? 0 = no; int>0 = kernel size")
parser.add_argument("--cluster-threshold","--ct",type=float,default=10.0 ,help="minimum value for cluster boundary")
parser.add_argument("--cluster-size-min" ,"--cs",type=int ,default=0 ,help="minimum number of datapoints before ending cluster")
# analysis routine
parser.add_argument("--do",type=str,nargs="+",default=["drift"],help="what analysis to perform, and any extra arguments it needs")
parser.add_argument("--poi",type=int,default=-2,help="which parameter from fit to analyze. negative = count back from end of list.")
parser.add_argument(
"--delta","--d",
type=str,
nargs="*",
action=cli.MergeAction,
const=((int,),(1,None,None,None)),
default=[0],
help="analyze difference between pairs of clusters with: separation=1 pair_start=None pair_stop=None pair_step=None"
)
# output
parser.add_argument("--fig",type=str,default="",help="location to save figure as png image (overwrites if file exists)")
# parse and run
args = parser.parse_args()
main(args)
| 2.75
| 3
|
pokedataset32_vae.py
|
EtreSerBe/PokeAE
| 1
|
12777484
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
""" Variational Auto-Encoder Example.
Using a variational auto-encoder to generate digits images from noise.
MNIST handwritten digits are used as training examples.
References:
- Auto-Encoding Variational Bayes The International Conference on Learning
Representations (ICLR), Banff, 2014. <NAME>, <NAME>
- <NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
- [VAE Paper] https://arxiv.org/abs/1312.6114
- [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
This article is great to understand all that's going on here.
https://towardsdatascience.com/intuitively-understanding-variational-autoencoders-1bfe67eb5daf
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, newaxis, expand_dims
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.colors import hsv_to_rgb
from scipy.stats import norm # A normal continuous random variable.
# The location (loc) keyword specifies the mean. The scale (scale) keyword specifies the standard deviation.
import tensorflow as tf
import tflearn
import h5py
import pokedataset32_vae_functions as utilities
from PIL import Image
import colorsys
# current_dataset = 'pokedataset'
current_dataset = 'anime_faces_'
use_anime_with_types = True
if not use_anime_with_types:
X_full_HSV, Y_full_HSV, X_full_RGB, Y_full_RGB, X, Y, test_X, test_Y = utilities.ready_all_data_sets(
current_dataset)
else:
X, Y = utilities.prepare_dataset_for_input_layer(
'anime_faces_32_train_HSV_Two_Hot_Encoded_Augmented_With_Types.h5', in_dataset_x_label='anime_faces_32_X',
in_dataset_y_label='anime_faces_32_Y')
test_X, test_Y = utilities.prepare_dataset_for_input_layer(
'anime_faces_32_train_HSV_Two_Hot_Encoded_Augmented_With_Types.h5', in_dataset_x_label='anime_faces_32_X_test',
in_dataset_y_label='anime_faces_32_Y_test')
X_full_RGB, Y_full_RGB = utilities.prepare_dataset_for_input_layer(
'anime_faces_32_full_RGB_Two_Hot_Encoded.h5', in_dataset_x_label='anime_faces_32_X',
in_dataset_y_label='anime_faces_32_Y')
X_first_half = X[0:int(len(X) / 2)]
Y_first_half = Y[0:int(len(Y) / 2)]
test_X_first_half = test_X[0:int(len(test_X) / 2)]
test_Y_first_half = test_Y[0:int(len(test_Y) / 2)]
"""X_second_half = X[int(len(X) / 2):]
Y_second_half = Y[int(len(Y) / 2):]
test_X_second_half = test_X[int(len(test_X) / 2):]
test_Y_second_half = test_Y[int(len(test_Y) / 2):]"""
X_full_HSV = np.concatenate((X_first_half, test_X_first_half), axis=0)
Y_full_HSV = np.concatenate((Y_first_half, test_Y_first_half), axis=0)
Y_full_RGB = Y_full_HSV # Replace it, since RGB was not saved with types.
"""
X_noisy_HSV, Y_noisy_HSV = \
utilities.prepare_dataset_for_input_layer("pokedataset32_train_NOISE_HSV_Two_Hot_Encoded_Augmented.h5")
X_noisy_HSV_test, Y_noisy_HSV_test = \
utilities.prepare_dataset_for_input_layer("pokedataset32_train_NOISE_HSV_Two_Hot_Encoded_Augmented.h5",
in_dataset_x_label="pokedataset32_X_test",
in_dataset_y_label="pokedataset32_Y_test")
"""
# NOTE: Use these lines to output a visualization of the data sets, if you think
# there is any problem with them. But I've checked and they seem correct.
"""X_noisy_HSV = utilities.convert_to_format(X_noisy_HSV[:], 'HSV_TO_RGB')
utilities.export_as_atlas(X_noisy_HSV, X_noisy_HSV, name_prefix='NOISY_TRAIN_ATLAS')
X_noisy_HSV_test = utilities.convert_to_format(X_noisy_HSV_test[:], 'HSV_TO_RGB')
utilities.export_as_atlas(X_noisy_HSV_test, X_noisy_HSV_test, name_prefix='NOISY_TEST_ATLAS')"""
Y = Y * 0.5
test_Y = test_Y * 0.5
Y_full_HSV = Y_full_HSV * 0.5 # np.clip(Y_full_HSV, 0.0, 1.0)
Y_full_RGB = Y_full_RGB * 0.5
small_X = np.concatenate((X[0:200], test_X[0:200]), axis=0)
small_Y = np.concatenate((Y[0:200], test_Y[0:200]), axis=0)
# utilities.create_hashmap(X_full_HSV)
# Now we add the extra info from the Ys.
expanded_X = np.append(X, Y, axis=1) # It already contains the Flip-left-right augmentation.
# Now, we do the same for the training data
expanded_test_X = np.append(test_X, test_Y, axis=1)
# Right now it's the only expanded full that we need.
expanded_full_X_HSV = np.append(X_full_HSV, Y_full_HSV, axis=1)
expanded_small_X = np.append(small_X, small_Y, axis=1)
print("expanded Xs and Ys ready")
# utilities.initialize_session()
# current_session = utilities.get_session()
predict_full_dataset = False
optimizer_name = 'adam'
loss_name = 'vae_loss'
final_model_name = utilities.get_model_descriptive_name(optimizer_name, loss_name, in_version='_anime')
# I put the network's definition in the pokedataset32_vae_functions.py file, to unify it with the load model.
network_instance = utilities.get_network()
network_instance = tflearn.regression(network_instance,
optimizer=optimizer_name,
metric='R2',
loss=utilities.vae_loss,
learning_rate=0.0002) # adagrad? #adadelta #nesterov did good,
model = tflearn.DNN(network_instance) # , session=current_session) # , tensorboard_verbose=2)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("Preparing model to fit.")
model.fit(expanded_X, Y_targets=expanded_X,
n_epoch=1,
shuffle=True,
show_metric=True,
snapshot_epoch=True,
batch_size=128,
# validation_set=0.15, # It also accepts a float < 1 to performs a data split over training data.
validation_set=(expanded_test_X, expanded_test_X), # We use it for validation for now. But also test.
run_id='encoder_decoder')
print("getting samples to show on screen.")
encode_decode_sample = []
if predict_full_dataset:
predicted_X = X
predicted_Y = Y_full_RGB
encode_decode_sample = utilities.predict_batches(expanded_full_X_HSV, model, in_samples_per_batch=64)
else:
predicted_X = small_X
predicted_Y = small_Y
encode_decode_sample = utilities.predict_batches(expanded_small_X, model, in_samples_per_batch=64)
# encode_decode_sample = model.predict(expanded_X) # Just to test training with RGB. It seemed worse.
print("The number of elements in the predicted samples is: " + str(len(encode_decode_sample)))
reconstructed_pixels = []
reconstructed_types = []
# Made a function to avoid repeating that fragment of code in other python files.
reconstructed_pixels, reconstructed_types = utilities.reconstruct_pixels_and_types(encode_decode_sample)
print("Exporting reconstructed pokemon as an image.")
# utilities.export_as_atlas(X_full_RGB, reconstructed_pixels) # I have checked that it works perfectly.
if predict_full_dataset:
correct_indices = utilities.export_types_csv(Y_full_RGB, reconstructed_types)
else:
correct_indices = utilities.export_types_csv(small_Y, reconstructed_types)
# This is used to export an image only containing the ones whose types were correctly predicted by the NN.
# correct_X_RGB = [X_full_RGB[i] for i in correct_indices]
# correct_reconstructed_pixels = [reconstructed_pixels[i] for i in correct_indices]
# utilities.export_as_atlas(correct_X_RGB, correct_reconstructed_pixels, name_annotations='correct')
# I used this before to show the results, but now I have the whole image being saved.
print("PREPARING TO SHOW IMAGE")
# Compare original images with their reconstructions.
f, a = plt.subplots(2, 20, figsize=(20, 2), squeeze=False) # figsize=(50, 2),
for i in range(20):
# reshaped_pokemon = np.multiply(reshaped_pokemon, 255.0)
reshaped_pokemon = np.reshape(np.asarray(predicted_X[i]), [1024, 3])
RGBOriginal = matplotlib.colors.hsv_to_rgb(reshaped_pokemon)
RGBOriginal = np.asarray(RGBOriginal).flatten()
temp = [[ii] for ii in list(RGBOriginal)] # WTH? Python, you're drunk haha.
print("ORIGINAL Types for Pokemon " + str(i) + " are: ")
utilities.print_pokemon_types(predicted_Y[i])
a[0][i].imshow(np.reshape(temp, (32, 32, 3)))
temp = [[ii] for ii in list(reconstructed_pixels[i])]
a[1][i].imshow(np.reshape(temp, (32, 32, 3)))
print("Types for Pokemon " + str(i) + " are: ")
utilities.print_pokemon_types(reconstructed_types[i])
f.show()
plt.draw()
plt.waitforbuttonpress()
print('Now saving the model')
model.save(final_model_name)
print('Save successful, closing application now.')
| 3.171875
| 3
|
refsql/__init__.py
|
akaariai/django-refsql
| 7
|
12777485
|
<filename>refsql/__init__.py
from .expressions import RefSQL # noqa
| 1.039063
| 1
|
nlp_202/hw4/model.py
|
daohuei/ucsc-nlp-unicorn
| 0
|
12777486
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from constants import START_TAG, STOP_TAG, DEVICE
from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor
from data import tag_vocab, max_word_len, char_vocab, word_vocab
class BiLSTM_CRF(nn.Module):
def __init__(
self,
vocab_size,
tag_to_ix,
embedding_dim,
hidden_dim,
char_cnn=False,
char_cnn_stride=2,
char_cnn_kernel=2,
char_embedding_dim=4,
loss="crf_loss",
cost=hamming_loss(),
):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.char_cnn = char_cnn
self.max_word_len = max_word_len
self.loss_type = loss
self.cost = cost
self.word_embeds = nn.Embedding(
vocab_size, embedding_dim, padding_idx=0
)
self.char_cnn_layer = CharCNN(
max_word_len=max_word_len,
embedding_dim=char_embedding_dim,
kernel=char_cnn_kernel,
stride=char_cnn_stride,
)
self.lstm_input_dim = embedding_dim
if char_cnn:
self.lstm_input_dim = (
self.embedding_dim + self.char_cnn_layer.embedding_dim
)
self.lstm = nn.LSTM(
self.lstm_input_dim,
hidden_dim // 2,
num_layers=1,
bidirectional=True,
batch_first=True,
)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size)
)
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
def init_hidden(self, batch):
# cell state and hidden state initialization
# D*num_layers x batch x hidden_dim
# D = 2 if bidirectional=True otherwise 1
return (
torch.randn(2, batch, self.hidden_dim // 2).to(DEVICE),
torch.randn(2, batch, self.hidden_dim // 2).to(DEVICE),
)
def _forward_alg(self, feats, golds=None, cost=None):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000.0).to(
DEVICE
) # 1 x |tag_set|
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.0
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence: the emission scores
for i, feat in enumerate(feats):
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = (
feat[next_tag].view(1, -1).expand(1, self.tagset_size)
)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = None
if cost is not None:
# generate log sum exp(score + cost)
next_tag_var = (
forward_var
+ trans_score
+ emit_score
+ cost(golds[i], next_tag)
)
else:
next_tag_var = forward_var + trans_score + emit_score
assert next_tag_var != None
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentences, seq_lens):
# for getting sentence features from LSTM in tag space
batch_size = len(sentences)
self.hidden = self.init_hidden(batch=batch_size)
# embeds shape: batch x seq_len x emb_dim
embeds = self.word_embeds(sentences)
# character-level embedding
if self.char_cnn:
# generate char-level embedding for each token, go over sequence
char_embeddeds = []
for i in range(sentences.size()[1]):
token_vector = sentences[:, i]
char_tensor = convert_to_char_tensor(
token_vector, word_vocab, char_vocab, self.max_word_len
).to(DEVICE)
char_embedded = self.char_cnn_layer(char_tensor)
char_embedded = torch.transpose(char_embedded, 1, 2)
char_embeddeds.append(char_embedded)
# concatenate all chars together in sequence level
char_embeddeds = torch.cat(char_embeddeds, 1)
# concatenate word and char-level embedding together in embedding dimension
embeds = torch.cat([char_embeddeds, embeds], 2)
packed_embeds = pack_padded_sequence(
embeds, seq_lens, batch_first=True
)
# LSTM output: batch x seq_len x hidden_dim
lstm_out, self.hidden = self.lstm(packed_embeds, self.hidden)
lstm_out, _ = pad_packed_sequence(lstm_out, batch_first=True)
# generate emission score with linear layer
lstm_feats = self.hidden2tag(lstm_out)
# len(sentence) x len(tag_set)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1).to(DEVICE)
tags = torch.cat(
[
torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long).to(
DEVICE
),
tags,
]
)
for i, feat in enumerate(feats):
tag_vocab.idx2token[tags[i + 1].item()]
score = (
score
+ self.transitions[tags[i + 1], tags[i]]
+ feat[tags[i + 1]]
)
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats, golds=None, cost=None):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tagset_size), -10000.0).to(DEVICE)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for i, feat in enumerate(feats):
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = None
if cost is not None:
# get the cost score
cost_score = torch.full(
(1, self.tagset_size), cost(golds[i], next_tag)
).to(DEVICE)
# add to the score
next_tag_var = (
forward_var + self.transitions[next_tag] + cost_score
)
else:
next_tag_var = forward_var + self.transitions[next_tag]
assert next_tag_var != None
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags, seq_lens):
# loss function: negative log likelihood
# emission score: seq_len x batch_size x len(tag_set)
feats_tensor = self._get_lstm_features(sentence, seq_lens)
loss = torch.tensor(0, dtype=torch.long)
# go other batch dimension
for i in range(feats_tensor.size()[0]):
feats = feats_tensor[i, : seq_lens[i], :]
tag_seq = tags[i, : seq_lens[i]]
current_loss = None
if self.loss_type in "softmax_margin_loss":
# soft margin loss = - gold score + normalizer(log_sum_exp (score + cost))
forward_score = self._forward_alg(feats, tag_seq, self.cost)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = forward_score - gold_score
elif self.loss_type == "svm_loss":
# svm loss = - gold score + max(score + cost)
viterbi_score, _ = self._viterbi_decode(
feats, tag_seq, self.cost
)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = viterbi_score - gold_score
elif self.loss_type == "ramp_loss":
# ramp loss = - max(score) + max(score + cost)
viterbi_score, _ = self._viterbi_decode(feats)
viterbi_score_with_cost, _ = self._viterbi_decode(
feats, tag_seq, self.cost
)
current_loss = viterbi_score_with_cost - viterbi_score
elif self.loss_type == "soft_ramp_loss":
# soft ramp loss = - log_sum_exp (score) + log_sum_exp (score + cost)
forward_score = self._forward_alg(feats)
forward_score_with_cost = self._forward_alg(
feats, tag_seq, self.cost
)
current_loss = forward_score_with_cost - forward_score
else:
# crf loss = - gold score + normalizer(log_sum_exp (score))
forward_score = self._forward_alg(feats, tag_seq)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = forward_score - gold_score
assert current_loss != None
loss = loss + current_loss
return loss
def forward(
self, sentence, seq_lens
): # dont confuse this with _forward_alg above.
scores, preds = [], []
# Get the "emission scores" from the BiLSTM
lstm_feats_tensor = self._get_lstm_features(sentence, seq_lens)
for i in range(lstm_feats_tensor.size()[0]):
lstm_feats = lstm_feats_tensor[i, : seq_lens[i], :]
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
scores += [score]
preds += [tag_seq]
return scores, preds
class CharCNN(nn.Module):
def __init__(
self,
stride=2,
kernel=2,
embedding_dim=4,
max_word_len=20,
):
super(CharCNN, self).__init__()
# Parameters regarding text preprocessing
self.embedding_dim = embedding_dim
self.max_word_len = max_word_len
self.vocab_size = len(char_vocab.token2idx)
# Dropout definition
self.dropout = nn.Dropout(0.25)
# CNN parameters definition
self.kernel = kernel
self.stride = stride
self.padding = self.kernel - 1
# Embedding layer definition:
self.embedding = nn.Embedding(
self.vocab_size,
self.embedding_dim,
padding_idx=0,
)
# Convolution layer definition
self.conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
)
self.output_dim = (
self.max_word_len + 2 * self.padding - (self.kernel - 1) - 1
) // self.stride + 1
# Max pooling layers definition
self.pool = nn.MaxPool1d(self.output_dim, 1)
def forward(self, X):
# X: input token
embedded = self.embedding(X)
embedded = torch.transpose(embedded, 1, 2)
embedded = self.dropout(embedded)
conv_out = self.conv(embedded)
pool_out = self.pool(conv_out)
return pool_out
| 2.46875
| 2
|
napari/_qt/qt_plugin_sorter.py
|
danielballan/napari
| 0
|
12777487
|
"""Provides a QtPluginSorter that allows the user to change plugin call order.
"""
from typing import List, Optional, Union
from qtpy.QtCore import QEvent, Qt, Signal, Slot
from qtpy.QtWidgets import (
QCheckBox,
QComboBox,
QDialog,
QFrame,
QGraphicsOpacityEffect,
QHBoxLayout,
QLabel,
QListWidget,
QListWidgetItem,
QSizePolicy,
QVBoxLayout,
QWidget,
)
from ..plugins import plugin_manager as napari_plugin_manager
from napari_plugin_engine import HookImplementation, HookCaller, PluginManager
from .utils import drag_with_pixmap
class ImplementationListItem(QFrame):
"""A Widget to render each hook implementation item in a ListWidget.
Parameters
----------
item : QListWidgetItem
An item instance from a QListWidget. This will most likely come from
:meth:`QtHookImplementationListWidget.add_hook_implementation_to_list`.
parent : QWidget, optional
The parent widget, by default None
Attributes
----------
plugin_name_label : QLabel
The name of the plugin providing the hook implementation.
enabled_checkbox : QCheckBox
Checkbox to set the ``enabled`` status of the corresponding hook
implementation.
opacity : QGraphicsOpacityEffect
The opacity of the whole widget. When self.enabled_checkbox is
unchecked, the opacity of the item is decreased.
"""
def __init__(self, item: QListWidgetItem, parent: QWidget = None):
super().__init__(parent)
self.setToolTip("Click and drag to change call order")
self.item = item
self.opacity = QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.opacity)
layout = QHBoxLayout()
self.setLayout(layout)
self.position_label = QLabel()
self.update_position_label()
self.plugin_name_label = QLabel(item.hook_implementation.plugin_name)
self.enabled_checkbox = QCheckBox(self)
self.enabled_checkbox.setToolTip("Uncheck to disable this plugin")
self.enabled_checkbox.stateChanged.connect(self._set_enabled)
self.enabled_checkbox.setChecked(
getattr(item.hook_implementation, 'enabled', True)
)
layout.addWidget(self.position_label)
layout.addWidget(self.enabled_checkbox)
layout.addWidget(self.plugin_name_label)
layout.setStretch(2, 1)
layout.setContentsMargins(0, 0, 0, 0)
def _set_enabled(self, state: Union[bool, int]):
"""Set the enabled state of this hook implementation to ``state``."""
self.item.hook_implementation.enabled = bool(state)
self.opacity.setOpacity(1 if state else 0.5)
def update_position_label(self, order=None):
"""Update the label showing the position of this item in the list.
Parameters
----------
order : list, optional
A HookOrderType list ... unused by this function, but here for ease
of signal connection, by default None.
"""
position = self.item.listWidget().indexFromItem(self.item).row() + 1
self.position_label.setText(str(position))
class QtHookImplementationListWidget(QListWidget):
"""A ListWidget to display & sort the call order of a hook implementation.
This class will usually be instantiated by a
:class:`~napari._qt.qt_plugin_sorter.QtPluginSorter`. Each item in the list
will be rendered as a :class:`ImplementationListItem`.
Parameters
----------
parent : QWidget, optional
Optional parent widget, by default None
hook : HookCaller, optional
The ``HookCaller`` for which to show implementations. by default None
(i.e. no hooks shown)
Attributes
----------
hook_caller : HookCaller or None
The current ``HookCaller`` instance being shown in the list.
"""
order_changed = Signal(list) # emitted when the user changes the order.
def __init__(
self,
parent: Optional[QWidget] = None,
hook_caller: Optional[HookCaller] = None,
):
super().__init__(parent)
self.setDefaultDropAction(Qt.MoveAction)
self.setDragEnabled(True)
self.setDragDropMode(self.InternalMove)
self.setSelectionMode(self.SingleSelection)
self.setAcceptDrops(True)
self.setSpacing(1)
self.setMinimumHeight(1)
self.setSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding
)
self.order_changed.connect(self.permute_hook)
self.hook_caller: Optional[HookCaller] = None
self.set_hook_caller(hook_caller)
def set_hook_caller(self, hook_caller: Optional[HookCaller]):
"""Set the list widget to show hook implementations for ``hook_caller``.
Parameters
----------
hook_caller : HookCaller, optional
A ``HookCaller`` for which to show implementations. by default None
(i.e. no hooks shown)
"""
self.clear()
self.hook_caller = hook_caller
if not hook_caller:
return
# _nonwrappers returns hook implementations in REVERSE call order
# so we reverse them here to show them in the list in the order in
# which they get called.
for hook_implementation in reversed(hook_caller._nonwrappers):
self.append_hook_implementation(hook_implementation)
def append_hook_implementation(
self, hook_implementation: HookImplementation
):
"""Add a list item for ``hook_implementation`` with a custom widget.
Parameters
----------
hook_implementation : HookImplementation
The hook implementation object to add to the list.
"""
item = QListWidgetItem(parent=self)
item.hook_implementation = hook_implementation
self.addItem(item)
widg = ImplementationListItem(item, parent=self)
item.setSizeHint(widg.sizeHint())
self.order_changed.connect(widg.update_position_label)
self.setItemWidget(item, widg)
def dropEvent(self, event: QEvent):
"""Triggered when the user moves & drops one of the items in the list.
Parameters
----------
event : QEvent
The event that triggered the dropEvent.
"""
super().dropEvent(event)
order = [self.item(r).hook_implementation for r in range(self.count())]
self.order_changed.emit(order)
def startDrag(self, supportedActions: Qt.DropActions):
drag = drag_with_pixmap(self)
drag.exec_(supportedActions, Qt.MoveAction)
@Slot(list)
def permute_hook(self, order: List[HookImplementation]):
"""Rearrage the call order of the hooks for the current hook impl.
Parameters
----------
order : list
A list of str, hook_implementation, or module_or_class, with the
desired CALL ORDER of the hook implementations.
"""
if not self.hook_caller:
return
self.hook_caller.bring_to_front(order)
class QtPluginSorter(QDialog):
"""Dialog that allows a user to change the call order of plugin hooks.
A main QComboBox lets the user pick which hook specification they would
like to reorder. Then a :class:`QtHookImplementationListWidget` shows the
current call order for all implementations of the current hook
specification. The user may then reorder them, or disable them by checking
the checkbox next to each hook implementation name.
Parameters
----------
plugin_manager : PluginManager, optional
An instance of a PluginManager. by default, the main
:class:`~napari.plugins.manager.PluginManager` instance
parent : QWidget, optional
Optional parent widget, by default None
initial_hook : str, optional
If provided the QComboBox at the top of the dialog will be set to
this hook, by default None
firstresult_only : bool, optional
If True, only hook specifications that declare the "firstresult"
option will be included. (these are hooks for which only the first
non None result is returned). by default True (because it makes
less sense to sort hooks where we just collect all results anyway)
https://pluggy.readthedocs.io/en/latest/#first-result-only
Attributes
----------
hook_combo_box : QComboBox
A dropdown menu to select the current hook.
hook_list : QtHookImplementationListWidget
The list widget that displays (and allows sorting of) all of the hook
implementations for the currently selected hook.
"""
NULL_OPTION = 'select hook... '
def __init__(
self,
plugin_manager: PluginManager = napari_plugin_manager,
*,
parent: Optional[QWidget] = None,
initial_hook: Optional[str] = None,
firstresult_only: bool = True,
):
super().__init__(parent)
self.setWindowModality(Qt.NonModal)
self.plugin_manager = plugin_manager
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.hook_combo_box = QComboBox()
self.hook_combo_box.addItem(self.NULL_OPTION)
# populate comboBox with all of the hooks known by the plugin manager
hooks = []
for name, hook_caller in plugin_manager.hooks.items():
if firstresult_only:
# if the firstresult_only option is set
# we only want to include hook_specifications that declare the
# "firstresult" option as True.
if not hook_caller.spec.opts.get('firstresult', False):
continue
hooks.append(name)
self.hook_combo_box.addItems(hooks)
self.hook_combo_box.setToolTip(
"select the hook specification to reorder"
)
self.hook_combo_box.activated[str].connect(self.set_current_hook)
self.hook_list = QtHookImplementationListWidget(parent=self)
title = QLabel('Plugin Sorter')
title.setObjectName("h2")
self.layout.addWidget(title)
instructions = QLabel(
'Select a hook to rearrange, then drag and '
'drop plugins into the desired call order. '
'\nDisable plugins by unchecking their checkbox.'
)
instructions.setWordWrap(True)
self.layout.addWidget(instructions)
self.layout.addWidget(self.hook_combo_box)
self.layout.addWidget(self.hook_list)
if initial_hook is not None:
self.hook_combo_box.setCurrentText(initial_hook)
self.set_current_hook(initial_hook)
def set_current_hook(self, hook: str):
"""Change the hook specification shown in the list widget.
Parameters
----------
hook : str
Name of the new hook specification to show.
"""
if hook == self.NULL_OPTION:
hook_caller = None
else:
hook_caller = getattr(self.plugin_manager.hooks, hook)
self.hook_list.set_hook_caller(hook_caller)
| 2.484375
| 2
|
tests/test_docs.py
|
hopcolony/python-aiohopcolony
| 0
|
12777488
|
<gh_stars>0
import pytest
from .config import *
import aiohopcolony
from aiohopcolony import docs
@pytest.fixture
async def project():
return await aiohopcolony.initialize(username=user_name, project=project_name,
token=token)
@pytest.fixture
def db():
return docs.client()
class TestDocs(object):
index = ".hop.tests"
uid = "hopcolony"
data = {"purpose": "Test Hop Docs!"}
@pytest.mark.asyncio
async def test_a_initialize(self, project, db):
assert project.config != None
assert project.name == project_name
assert db.project.name == project.name
assert db.client.host == "docs.hopcolony.io"
assert db.client.identity == project.config.identity
@pytest.mark.asyncio
async def test_b_status(self, db):
status = await db.status
assert status["status"] != "red"
@pytest.mark.asyncio
async def test_c_create_document(self, db):
snapshot = await db.index(self.index).document(self.uid).setData(self.data)
assert snapshot.success == True
doc = snapshot.doc
assert doc.index == self.index
assert doc.id == self.uid
assert doc.source == self.data
@pytest.mark.asyncio
async def test_d_get_document(self, db):
snapshot = await db.index(self.index).document(self.uid).get()
assert snapshot.success == True
doc = snapshot.doc
assert doc.index == self.index
assert doc.id == self.uid
assert doc.source == self.data
@pytest.mark.asyncio
async def test_e_delete_document(self, db):
snapshot = await db.index(self.index).document(self.uid).delete()
assert snapshot.success == True
@pytest.mark.asyncio
async def test_f_find_non_existing(self, db):
snapshot = await db.index(self.index).document(self.uid).get()
assert snapshot.success == False
snapshot = await db.index(self.index).document(self.uid).update({"data": "test"})
assert snapshot.success == False
snapshot = await db.index(self.index).document(self.uid).delete()
assert snapshot.success == False
snapshot = await db.index(".does.not.exist").get()
assert snapshot.success == False
@pytest.mark.asyncio
async def test_g_create_document_without_id(self, db):
snapshot = await db.index(self.index).add(self.data)
assert snapshot.success == True
doc = snapshot.doc
assert doc.index == self.index
assert doc.source == self.data
snapshot = await db.index(self.index).document(doc.id).delete()
assert snapshot.success == True
@pytest.mark.asyncio
async def test_h_delete_index(self, db):
result = await db.index(self.index).delete()
assert result == True
@pytest.mark.asyncio
async def test_i_index_not_there(self, db):
result = await db.get()
assert self.index not in [index.name for index in result]
| 1.976563
| 2
|
jiant/scripts/download_data/constants.py
|
isspek/jiant
| 0
|
12777489
|
<gh_stars>0
# Directly download tasks when not available in HF Datasets, or HF Datasets version
# is not suitable
SQUAD_TASKS = {"squad_v1", "squad_v2"}
DIRECT_SUPERGLUE_TASKS_TO_DATA_URLS = {
"wsc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
"multirc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/MultiRC.zip",
"record": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/ReCoRD.zip",
}
FAKENEWS_TASKS = {"fakenews_forecasting", "fakenews_unseen_1", "fakenews_unseen_2", "fakenews_unseen_3",
"fakenews_unseen_4", "fakenews_unseen_5",
"nela_unseen_1", "nela_unseen_2", "nela_unseen_3",
"nela_unseen_4", "nela_unseen_5", "fakenews_forecasting_reliability",
"fakenews_unseen_reliability_1", "fakenews_unseen_reliability_2", "fakenews_unseen_reliability_3",
"fakenews_unseen_reliability_4", "fakenews_unseen_reliability_5",
"fakenewscorpus_1", "fakenewscorpus_2", "fakenewscorpus_3", "fakenewscorpus_4", "fakenewscorpus_5",
"unseen_cind_satire_1", "unseen_cind_satire_2", "unseen_cind_satire_3", "unseen_cind_satire_4",
"unseen_cind_satire_5", "nela_satire_1", "nela_satire_2", "nela_satire_3",
"nela_satire_4", "nela_satire_5",
"fakenewscorpus_satire_1", "fakenewscorpus_satire_2", "fakenewscorpus_satire_3", "fakenewscorpus_satire_4", "fakenewscorpus_satire_5"
}
CLAIMBUSTER_TASKS = {'claimbuster_1', 'claimbuster_2', 'claimbuster_3', 'claimbuster_4', 'claimbuster_5'}
OTHER_DOWNLOAD_TASKS = {
"abductive_nli",
"fever_nli",
"swag",
"qamr",
"qasrl",
"newsqa",
"mrqa_natural_questions",
"piqa",
"winogrande",
}
DIRECT_DOWNLOAD_TASKS = set(
list(SQUAD_TASKS) + list(DIRECT_SUPERGLUE_TASKS_TO_DATA_URLS) + list(OTHER_DOWNLOAD_TASKS)
)
OTHER_HF_DATASETS_TASKS = {
"snli",
"commonsenseqa",
"hellaswag",
"cosmosqa",
"socialiqa",
"scitail",
"quoref",
"adversarial_nli_r1",
"adversarial_nli_r2",
"adversarial_nli_r3",
"arc_easy",
"arc_challenge",
}
| 1.390625
| 1
|
easybill_rest/tests/test_logins.py
|
soerenbe/py-ebrest
| 5
|
12777490
|
import unittest
from unittest import mock
from easybill_rest import Client
from easybill_rest.resources.resource_logins import ResourceLogins
from easybill_rest.tests.test_case_abstract import EasybillRestTestCaseAbstract
class TestResourceLogins(unittest.TestCase, EasybillRestTestCaseAbstract):
def setUp(self) -> None:
mocked_object = mock.Mock()
mocked_object.call = mock.Mock(return_value={})
self.mocked_object = ResourceLogins(mocked_object)
def test_get_endpoint(self) -> None:
self.assertEqual("/logins", Client('').logins().get_resource_endpoint())
def test_get_logins(self) -> None:
self.assertTrue(isinstance(
self.mocked_object.get_logins({"page": "2"}), dict))
def test_get_login(self) -> None:
self.assertTrue(isinstance(self.mocked_object.get_login("3"), dict))
@staticmethod
def get_suite() -> unittest.TestSuite:
return unittest.TestSuite(map(TestResourceLogins, [
'test_get_endpoint',
'test_get_logins',
'test_get_login',
]))
| 2.59375
| 3
|
tests/test_model.py
|
rychallener/TauREx3_public
| 0
|
12777491
|
<reponame>rychallener/TauREx3_public<filename>tests/test_model.py
import unittest
import shutil
import tempfile
from os import path
from unittest.mock import patch, mock_open
from taurex.model.model import ForwardModel
from taurex.model.simplemodel import SimpleForwardModel
import numpy as np
import pickle
class ForwardModelTest(unittest.TestCase):
def test_init(self):
pass
class SimpleForwardModelTest(unittest.TestCase):
def test_init(self):
model = SimpleForwardModel('test')
| 2.34375
| 2
|
other/mean_std.py
|
huhuzwxy/keras_classfication
| 2
|
12777492
|
import os
from PIL import Image
import numpy as np
## 图像数据集的均值与方差的计算
root_path = '../train_data'
_filename = os.listdir(root_path)
filename = []
for _file in _filename:
if not _file.endswith('.txt'):
filename.append(_file)
#均值之和
R_channel_m = 0
G_channel_m = 0
B_channel_m = 0
#方差之和
R_channel_s = 0
G_channel_s = 0
B_channel_s = 0
num = len(filename)
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1] #转换为BGR
img = img.astype(np.float32) / 225
B_channel_m = B_channel_m + np.sum(img[:, :, 0])/(img.shape[0]* img.shape[1])
G_channel_m = G_channel_m + np.sum(img[:, :, 1])/(img.shape[0]* img.shape[1])
R_channel_m = R_channel_m + np.sum(img[:, :, 2])/(img.shape[0]* img.shape[1])
B_mean = B_channel_m / num
G_mean = G_channel_m / num
R_mean = R_channel_m / num
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1]
img = img.astype(np.float32) / 225
B_channel_s = B_channel_s + np.sum(np.power(img[:, :, 0]-R_mean, 2) )/(img.shape[0]* img.shape[1])
G_channel_s = G_channel_s + np.sum(np.power(img[:, :, 1]-G_mean, 2) )/(img.shape[0]* img.shape[1])
R_channel_s = R_channel_s + np.sum(np.power(img[:, :, 2]-B_mean, 2) )/(img.shape[0]* img.shape[1])
B_std = np.sqrt(B_channel_s/num)
G_std = np.sqrt(G_channel_s/num)
R_std = np.sqrt(R_channel_s/num)
with open('mean_std.txt','w')as f:
text = "B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean) + '\n' + "B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std)
f.write(text)
print("B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean))
print("B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std))
| 2.6875
| 3
|
pydefect/tests/cli/vasp/test_make_unitcell.py
|
KazMorita/pydefect
| 1
|
12777493
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from pydefect.cli.vasp.make_unitcell import make_unitcell_from_vasp
from pymatgen.io.vasp import Vasprun, Outcar
def test_unitcell(vasp_files):
"""
HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)
------------------------------------------------------
1.269877 0.000000 -0.000000
0.000000 1.269877 0.000000
0.000000 0.000000 1.269877
------------------------------------------------------
MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)
------------------------------------------------------
1.255879 0.000000 -0.000000
-0.000000 1.255879 0.000000
-0.000000 0.000000 1.255879
------------------------------------------------------
"""
path = vasp_files / "unitcell_He_solid"
unitcell = make_unitcell_from_vasp(
vasprun_band=Vasprun(path / "vasprun-band.xml"),
outcar_band=Outcar(path / "OUTCAR-band"),
outcar_dielectric_clamped=Outcar(path / "OUTCAR-dielectric"),
outcar_dielectric_ionic=Outcar(path / "OUTCAR-dielectric"),
)
assert unitcell.vbm == -10.3168
assert unitcell.cbm == 1.2042
assert unitcell.ele_dielectric_const[0][0] == 1.255879
assert unitcell.ion_dielectric_const[0][0] == 0.0
| 1.945313
| 2
|
chromapy/chromapy.py
|
KShammout632/ChromaPy
| 0
|
12777494
|
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from skimage import color
from PIL import Image
import matplotlib.pyplot as plt
from cnn_model import Model
# from cnn_model2 import Model as Model_unet
import pickle
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", type=str, required=False,
help="path to input black and white image")
parser.add_argument('--use_gpu', action='store_true', default=False,
help='whether to use GPU')
return parser.parse_args()
def preprocess_training_set(train):
processed_x = []
processed_y = []
for image in train:
l, ab = preprocess_image(image)
processed_x.append(l)
processed_y.append(ab)
return processed_x, processed_y
def preprocess_image(img, height=256, width=256):
"""Return the light intensity part of an image, resized and converted to tensor"""
# image = Image.open(img).convert('RGB')
# image_r = image.resize((width, height))
image_r_np = np.array(img) / 255.0
# Convert image to Lab format
image_lab = color.rgb2lab(image_r_np)
# Extract L dimension
image_l = image_lab[:,:,0]
image_ab = image_lab[:,:,1:]
# Convert to tensor and add relevant dimensions
image_l = image_l[None,:,:]
return image_l, image_ab
def postprocess_tens(orig_img, ab, mode='bilinear'):
# orig_img 1 x 1 x H_orig x W_orig
# ab 1 x 2 x H x W
HW_orig = orig_img.shape[2:]
HW = ab.shape[2:]
# Resize if needed
if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]):
ab_orig = F.interpolate(ab, size=HW_orig, mode=mode)
else:
ab_orig = ab
out_lab_orig = torch.cat((orig_img, ab_orig), dim=1)
out_lab_orig = out_lab_orig.data.cpu().numpy()
return color.lab2rgb(out_lab_orig.transpose((0,2,3,1)))
args = parse_arguments()
# image_dict = unpickle('C:\\Users\\karee\\Desktop\\ChromaPy\\data\\cifar-10-python\\cifar-10-batches-py\\data_batch_1')
# print(image_dict[b'data'])
(X, y), (x_test, y_test) = cifar10.load_data()
# Split data into training and validation
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
og_image = x_train[0:10]
x_train, y_train = preprocess_training_set(x_train[:10])
x_val, y_val = preprocess_training_set(x_val[:10])
tensor_x_train = torch.Tensor(x_train).float()
tensor_x_val = torch.Tensor(x_val).float()
tensor_y_train = torch.Tensor(y_train).permute(0,3,1,2).float()
tensor_y_val = torch.Tensor(y_val).permute(0,3,1,2).float()
# Dataset dictionary
dsets = {
"train": data.TensorDataset(tensor_x_train,tensor_y_train),
"val": data.TensorDataset(tensor_x_val,tensor_y_val)}
dataloaders = {x : data.DataLoader(dsets[x], batch_size=6, shuffle=True)
for x in ['train', 'val']}
dataset_sizes = {x : len(dsets[x]) for x in ["train","val"]}
# model_unet = Model_unet(1,2)
# model_unet_ft = model_unet.fit(dataloaders,1)
# ab_out = model_unet_ft.forward(tensor_x_train[0:5])
model = Model()
model_ft = model.fit(dataloaders, 1)
ab_out = model_ft.forward(tensor_x_train[0:5])
image_new = postprocess_tens(tensor_x_train[0:5], ab_out)
f, axarr = plt.subplots(2,2)
axarr[0,0].imshow(og_image[0])
axarr[0,1].imshow(image_new[0])
axarr[1,0].imshow(og_image[1])
axarr[1,1].imshow(image_new[1])
plt.show()
| 2.515625
| 3
|
tests/test_validating.py
|
ealesid/starlette-jsonrpc
| 29
|
12777495
|
from . import client
# JSON
def test_payload_as_empty_dict():
payload = {}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "None",
"error": {"code": -32600, "message": "Invalid Request.", "data": {}},
}
def test_payload_as_empty_list():
payload = []
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "None",
"error": {"code": -32600, "message": "Invalid Request.", "data": {}},
}
def test_incorrect_payload():
payload = [1]
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "None",
"error": {"code": -32600, "message": "Invalid Request.", "data": {}},
}
# PARAMS
def test_positional_parameters():
payload = {
"jsonrpc": "2.0",
"method": "subtract_positional",
"params": [42, 23],
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "1", "result": 19}
def test_positional_parameters_2():
payload = {
"jsonrpc": "2.0",
"method": "subtract_positional",
"params": [23, 42],
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "1", "result": -19}
def test_named_parameters():
payload = {
"jsonrpc": "2.0",
"method": "SubtractMethod",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "1", "result": 19}
def test_named_parameters_2():
payload = {
"jsonrpc": "2.0",
"method": "SubtractMethod",
"params": {"y": 23, "x": 42},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "1", "result": 19}
def test_named_parameters_3():
payload = {
"jsonrpc": "2.0",
"method": "sum",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "1", "result": {"sum": 65}}
def test_params_not_object():
payload = {"jsonrpc": "2.0", "method": "subtract", "params": "", "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"params": "Did not match any valid type."},
},
}
def test_params_as_invalid_object():
payload = {"jsonrpc": "2.0", "method": "subtract", "params": {}, "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"params": "Required param: 'x'"},
},
}
def test_params_as_invalid_list():
payload = {
"jsonrpc": "2.0",
"method": "subtract_positional",
"params": [1],
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {
"params": "subtract_positional() missing 1 required positional argument: 'y'"
},
},
}
def test_without_params():
payload = {"jsonrpc": "2.0", "method": "my_method", "id": "1"}
response = client.post("/api/", json=payload)
assert response.status_code == 200
# ID
def test_id_as_integer():
payload = {
"jsonrpc": "2.0",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": 1,
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": 1, "result": 19}
def test_id_as_string():
payload = {
"jsonrpc": "2.0",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": "abc",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": "abc", "result": 19}
def test_id_as_null():
payload = {
"jsonrpc": "2.0",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": None,
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": None, "result": 19}
def test_empty_id():
payload = {
"jsonrpc": "2.0",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": "",
}
response = client.post("/api/", json=payload)
assert response.json() == {"jsonrpc": "2.0", "id": None, "result": 19}
def test_notification():
"""
Notification
"""
payload = {"jsonrpc": "2.0", "method": "subtract", "params": {"x": 42, "y": 23}}
response = client.post("/api/", json=payload)
assert response.json() == {}
# JSONRPC
def test_jsonrpc_as_integer():
payload = {
"jsonrpc": 2,
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"jsonrpc": "Must be a string."},
},
}
def test_empty_jsonrpc():
payload = {
"jsonrpc": "",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"jsonrpc": "Must not be blank."},
},
}
def test_jsonrpc_wrong_value():
payload = {
"jsonrpc": "3.0",
"method": "subtract",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"jsonrpc": "Must match the pattern /2.0/."},
},
}
def test_without_jsonrpc():
payload = {"method": "subtract", "params": {"x": 42, "y": 23}, "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"jsonrpc": "This field is required."},
},
}
# METHOD
def test_not_registered_method():
payload = {
"jsonrpc": "2.0",
"method": "non_existing_method",
"params": {"x": 42, "y": 23},
"id": "1",
}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {"code": -32601, "message": "Method not found.", "data": {}},
}
def test_without_method():
payload = {"jsonrpc": "2.0", "params": {"x": 42, "y": 23}, "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"method": "This field is required."},
},
}
def test_with_empty_method():
payload = {"jsonrpc": "2.0", "method": "", "params": {"x": 42, "y": 23}, "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"method": "Must not be blank."},
},
}
def test_method_as_integer():
payload = {"jsonrpc": "2.0", "method": 1, "params": {"x": 42, "y": 23}, "id": "1"}
response = client.post("/api/", json=payload)
assert response.json() == {
"jsonrpc": "2.0",
"id": "1",
"error": {
"code": -32602,
"message": "Invalid params.",
"data": {"method": "Must be a string."},
},
}
# def test_with_method_name_starting_with_rpc_period():
# pass
| 2.625
| 3
|
basic_email_user/models.py
|
garyburgmann/django-basic-email-user
| 1
|
12777496
|
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractUser
from django.core.validators import EmailValidator
from django.contrib.auth.validators import UnicodeUsernameValidator
class UserManager(BaseUserManager):
def validate_email(self, email):
""" Verify email arguemnt and return normalised value
:param email: expect str
:returns: normalised email str if correct
:raises ValueError: invalid param email
:raises Exception: existing email
"""
if email is None:
raise ValueError("Missing email value")
elif type(email) is not str:
raise ValueError("Invalid email value, expect str")
normalized_email = self.normalize_email(email)
existing_email = \
self.model.objects.filter(email=normalized_email).first()
if existing_email:
raise Exception("This email is already assigned to another User")
return normalized_email
def create_user(self, email, name, password=None):
""" Creates and saves a User
:param email: expect str
:param name: expect str
:param password: expect str or None, default None
:returns: User model
"""
user = self.model(
email=self.validate_email(email),
name=name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password=None):
""" Creates and saves a User with superuser privileges
:param email: expect str
:param name: expect str
:param password: expect str or None, default None
:returns: User model
"""
user = self.model(
email=self.validate_email(email),
name=name
)
user.set_password(password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
""" User model class (AbstractUser with modified properties)
removes: username, first_name, last_name
adds: name
"""
email = models.EmailField(
verbose_name="email address",
error_messages={
'unique': "A user with that email already exists.",
},
help_text="Required. 150 characters or fewer.",
max_length=150,
unique=True,
validators=[EmailValidator],
)
username = None
first_name = None
last_name = None
name = models.CharField(
verbose_name="name",
max_length=150,
help_text=(
"Required. 150 characters or fewer. "
"Letters, digits and @/./+/-/_ only."
),
validators=[UnicodeUsernameValidator]
)
objects = UserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["name"]
class Meta:
db_table = "users"
| 2.75
| 3
|
Triangle_Solver/main.py
|
RobertElias/PythonProjects
| 0
|
12777497
|
import math
# Triangle Solver
print("Welcome to the Right Triangle Solver App.")
side_a = float(input("\nWhat is the first leg of the triangle: "))
side_b = float(input("What is the second leg of the triangle: "))
# Calculations
side_c = math.sqrt(side_a**2 + side_b**2)
side_c = round(side_c, 3)
area = 0.5 * side_a * side_b
area = round(area, 3)
# Summary
print("\nFor a triangle with legs of " + str(side_a) + " and " +
str(side_b) + " the hypotenuse is " + str(side_c))
print("For a triangle with legs of " + str(side_a) + " and " +
str(side_b) + " the area is " + str(area))
| 4.21875
| 4
|
python/chartParsing.py
|
pramitmallick/spinn
| 103
|
12777498
|
"""
Artifical test for chart parsing
"""
from random import shuffle
import numpy as np
import string
def generate_string(length):
letters = list(string.ascii_lowercase) + list(string.ascii_uppercase)
shuffle(letters)
output = []
for i in range(length):
output.append(letters[i])
return output
sen_length = 25
sentence = generate_string(sen_length)
# Compose : [A, B] = (A) + (B) = (AB)
# Combine : ((AB)C), (A(BC)) = (ABC)
# A + B = (AB)
# (AB) + C = ((AB)C)
def compose(l, r):
return "(" + l + r + ")"
def combine(list_versions):
return list_versions[0]
#return list_versions[0].replace("(","").replace(")","")
def compute_compositions(sent):
length = len(sent) -1
l_hiddens = sent[:-1]
l_cells = sent[:-1]
r_hiddens = sent[1:]
r_cells = sent[1:]
chart = []
masks = []
choices = []
"""
layer_0 = []
for i in range(len(sent)):
layer_0.append((sent[i], sent[i]))
chart = [layer_0]
"""
chart = [sent] # list or tuple. w/e
masks = [np.zeros(len(sent))]
choices = [sent]
for row in range(1, len(sent)):
chart.append([])
masks.append([])
choices.append([])
for col in range(len(sent) - row):
chart[row].append(None)
masks[row].append(None)
choices[row].append(None)
for row in range(1, len(sent)): # = len(l_hiddens)
for col in range(len(sent) - row):
versions = []
for i in range(row):
#print row, col, chart[row-i-1][col], chart[i][row+col-i]
versions.append(compose(chart[row-i-1][col], chart[i][row+col-i]))
chart[row][col] = combine(versions)
choices[row][col] = versions
l = len(versions)
rand_pos = np.random.randint(l)
mask = np.zeros(l)
mask[rand_pos] += 1
masks[row][col] = mask
return chart, masks, choices
chart, mask, choices = compute_compositions(sentence)
"""
for row in len(choices):
for col in len(choices[row]):
pick = choices[row][col][int(np.where(mask[row][col])[0])]
"""
print choices[-1][-1][int(np.where(mask[-1][-1])[0])]
| 2.8125
| 3
|
Day 28/pomodoro-start/main.py
|
Jean-Bi/100DaysOfCodePython
| 0
|
12777499
|
<gh_stars>0
from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 1
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
# Number of repetitions
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
"""Resets the timer"""
window.after_cancel(timer)
canvas.itemconfig(timer_text, text="00:00")
timer_label.config(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 40, "normal"))
check_mark.config(text="")
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
"""Starts the timer"""
global reps
reps += 1
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps % 8 == 0:
timer_label.config(text="Break", fg=RED)
count_down(long_break_sec)
elif reps % 2 == 0:
timer_label.config(text="Break", fg=PINK)
count_down(short_break_sec)
else:
timer_label.config(text="Work", fg=GREEN)
count_down(work_sec)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
"""Counts the time down"""
global timer
count_min = math.floor(count / 60)
count_sec = count % 60
if count_sec < 10:
count_sec = f"0{count_sec}"
canvas.itemconfig(timer_text, text=f"{count_min}:{count_sec}")
if count > 0:
timer = window.after(1000, count_down, count-1)
else:
start_timer()
marks = ""
for i in range(math.floor(reps/2)):
marks += "✔"
check_mark.config(text=marks)
# ---------------------------- UI SETUP ------------------------------- #
# Creates the window with title, padding and background color
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
# Timer label
timer_label = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 40, "normal"))
timer_label.grid(column=1, row=0)
# Pomodoro image
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=tomato_img)
timer_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
canvas.grid(column=1, row=1)
# Start button
start_button = Button(text="Start", command=start_timer)
start_button.grid(column=0, row=2)
# Reset button
reset_button = Button(text="Reset", command=reset_timer)
reset_button.grid(column=2, row=2)
# Check marks
check_mark = Label(text="", fg=GREEN, bg=YELLOW)
check_mark.grid(column=1, row=3)
window.mainloop()
| 3.046875
| 3
|
source/16-Valor_da_conta.py
|
FelixLuciano/DesSoft-2020.2
| 0
|
12777500
|
<reponame>FelixLuciano/DesSoft-2020.2
# Valor da conta
# Escreva um programa que pergunta para o usuário o valor da conta do restaurante e imprime: "Valor da conta com 10%: R$ X.YZ", onde X.YZ é um número com exatamente duas casas decimais.
valor = float(input('Qual o valor da conta?'))
gorjeta = valor * 10/100 # 10% do valor
valor += gorjeta
print('Valor da conta com 10%: R$ {0:.2f}'.format(valor))
| 3.671875
| 4
|
to-jpg/to-jpg.py
|
niebniebnieb/echotango
| 0
|
12777501
|
<reponame>niebniebnieb/echotango
import os
from ftplib import FTP
from PIL import Image
from psd_tools import PSDImage
mode = 'ADD' # ADD | TEST | BULK
skipftp = False
QUALITY = 50
IM_SIZE = 800
savos = "/Users/thomasnieborowski/Desktop/SAVOS/IMG/"
remote_img = 'public_html/sebartsvirtual/wp-content/uploads/img'
if mode == 'TEST':
in_dir = "./in_img"
out_dir = "./out_img"
elif mode == 'ADD':
in_dir = savos + "ADD_IN_IMG"
out_dir = savos + "ADD_OUT_IMG"
else:
in_dir = savos + "IN_IMG"
out_dir = savos + "OUT_IMG"
save_dir = savos + "IN_IMG"
save_out_dir = savos + "SAVE_OUT_IMG"
print('Processing Images in Mode: ' + mode)
print('Skipping FTP: ' + str(skipftp))
print('Image Root Dir: ' + savos)
print('input dir: ' + in_dir)
print('output dir: ' + out_dir)
print('Input files saved in : ' + save_dir)
print('quality : ' + str(QUALITY))
print('Max Size : ' + str(IM_SIZE))
if not skipftp:
# save originally artist supplied files from previous run.
for save in os.listdir(in_dir):
save_path = os.path.join(in_dir, save)
os.rename(save_path, os.path.join(save_dir, save) )
# FTP files to local
with open(os.path.join(savos, "savospw"), "r") as f1:
pw = f1.read().replace('\n', '').split(',')
ftp = FTP(pw[0], pw[1], pw[2])
ftp.cwd(remote_img)
files = ftp.nlst()
for f2 in files:
if f2 == '.' or f2 == '..':
continue
localf = os.path.join(in_dir, f2)
with open(localf, 'wb') as f3:
ftp.retrbinary('RETR '+f2, f3.write)
for f4 in files:
if f4 == '.' or f4 == '..':
continue
ftp.delete(f4)
ftp.quit()
# Resize and compress files
for f33 in os.listdir(in_dir):
if f33 == '.DS_Store':
continue
f4 = f33.lower()
org = os.path.join(in_dir, f4)
newfile = f4.lower()
print('CONVERTing '+f4)
if f4.endswith('.jpeg'):
newfile = newfile.replace('.jpeg', '.jpg')
im = Image.open(org)
elif f4.endswith('.jpg'):
im = Image.open(org)
newfile = newfile
elif f4.endswith('.png'):
newfile = newfile.replace('.png', '.jpg')
im = Image.open(org)
im = im.convert('RGB')
elif f4.endswith('.tif'):
newfile = newfile.replace('.tif', '.jpg')
im = Image.open(org)
im = im.convert('RGB')
elif f4.endswith('.psd'):
png = org.replace('.psd', '.png')
os.system('psd-tools convert '+org+' '+png)
newfile = newfile.replace('.psd', '.jpg')
im = Image.open(png)
im = im.convert('RGB')
os.remove(png)
else:
print('ERROR: UNEXPECTED FILE EXTENSION: ' + f4)
continue
im.thumbnail((IM_SIZE, IM_SIZE))
newpath = os.path.join(out_dir, newfile)
im.save(newpath, quality=QUALITY)
print('CONVERTed '+f4+' to '+newpath)
for save in os.listdir(out_dir):
if save == '.DS_Store':
continue
save_path = os.path.join(out_dir, save)
os.system('cp '+ save_path + ' ' + os.path.join(save_out_dir, save))
print('Now upload precessed Images to Media Library:')
print('Dashboard > Media > Add New > Select > ' + out_dir)
| 2.65625
| 3
|
adaptdl/adaptdl/torch/__init__.py
|
pandyakaa/modified-adaptdl-sched
| 0
|
12777502
|
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if "darwin" in sys.platform.lower():
# To avoid multiple runs of the model code
# https://pythonspeed.com/articles/python-multiprocessing/
import multiprocessing
multiprocessing.set_start_method('fork')
import logging
import portpicker
import requests
import torch.distributed
import pkg_resources
import adaptdl.collective
import adaptdl.env
import semver
from .epoch import current_epoch, finished_epochs, remaining_epochs_until
from .data import current_dataloader, AdaptiveDataLoader, ElasticSampler
from .parallel import AdaptiveDataParallel
from .accumulator import Accumulator
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def version_check(version):
if semver.VersionInfo.isvalid(version) and \
version != "0.0.0":
return True
else:
return False
def init_process_group(backend):
url = adaptdl.env.supervisor_url()
if url:
key = adaptdl.env.job_id()
group = adaptdl.env.num_restarts()
while True:
response = requests.get(url=f"{url}/discover/{key}/{group}")
if response.status_code != 408: # Timeout.
break
response.raise_for_status()
master_addr = response.json()[0]
sched_version = adaptdl.env.adaptdl_sched_version()
trainer_version = pkg_resources.get_distribution("adaptdl").version
# if version_check(sched_version) and version_check(trainer_version):
# trainer_ver_maj = semver.VersionInfo.parse(trainer_version).major
# sched_ver_maj = semver.VersionInfo.parse(sched_version).major
# if trainer_ver_maj != sched_ver_maj:
# raise Exception('adaptdl version {} is incompatible with'
# 'scheduler version {}'.format(trainer_version,
# sched_version))
else:
master_addr = adaptdl.env.master_addr()
master_port = adaptdl.env.master_port()
# Initialize collective module.
adaptdl.collective.initialize(master_addr, master_port)
# Initialize torch.distributed.
torch_port = adaptdl.collective.broadcast(portpicker.pick_unused_port())
init_method = "tcp://{}:{}?rank={}&world_size={}".format(
master_addr, torch_port, adaptdl.env.replica_rank(),
adaptdl.env.num_replicas())
LOG.info("Initializing torch.distributed using %s", init_method)
torch.distributed.init_process_group(backend, init_method)
LOG.info("torch.distributed initialized")
__all__ = [
"init_process_group",
"current_epoch",
"finished_epochs",
"remaining_epochs_until",
"current_dataloader",
"AdaptiveDataLoader",
"ElasticSampler",
"AdaptiveDataParallel",
"Accumulator",
]
| 1.75
| 2
|
loss.py
|
Dyfine/SphericalEmbedding
| 41
|
12777503
|
import myutils
from torch.nn import Module, Parameter
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
class TripletLoss(Module):
def __init__(self, instance, margin=1.0):
super(TripletLoss, self).__init__()
self.margin = margin
self.instance = instance
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~adjacency
mask_ap = (adjacency.float() - torch.eye(nB).cuda()).long()
mask_an = adjacency_not.long()
dist_ap = (dist[mask_ap == 1]).view(-1, 1)
dist_an = (dist[mask_an == 1]).view(nB, -1)
dist_an = dist_an.repeat(1, self.instance - 1)
dist_an = dist_an.view(nB * (self.instance - 1), nB - self.instance)
num_loss = dist_an.size(0) * dist_an.size(1)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), self.margin + dist_ap - dist_an)) / num_loss
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
class TripletSemihardLoss(Module):
def __init__(self, margin=0.2):
super(TripletSemihardLoss, self).__init__()
self.margin = margin
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
temp_euclidean_score = dist * 1.0
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~ adjacency
dist_tile = dist.repeat(nB, 1)
mask = (adjacency_not.repeat(nB, 1)) * (dist_tile > (dist.transpose(0, 1).contiguous().view(-1, 1)))
mask_final = (mask.float().sum(dim=1, keepdim=True) > 0).view(nB, nB).transpose(0, 1)
# negatives_outside: smallest D_an where D_an > D_ap
temp1 = (dist_tile - dist_tile.max(dim=1, keepdim=True)[0]) * (mask.float())
negtives_outside = temp1.min(dim=1, keepdim=True)[0] + dist_tile.max(dim=1, keepdim=True)[0]
negtives_outside = negtives_outside.view(nB, nB).transpose(0, 1)
# negatives_inside: largest D_an
temp2 = (dist - dist.min(dim=1, keepdim=True)[0]) * (adjacency_not.float())
negtives_inside = temp2.max(dim=1, keepdim=True)[0] + dist.min(dim=1, keepdim=True)[0]
negtives_inside = negtives_inside.repeat(1, nB)
semi_hard_negtives = torch.where(mask_final, negtives_outside, negtives_inside)
loss_mat = self.margin + dist - semi_hard_negtives
mask_positives = adjacency.float() - torch.eye(nB).cuda()
mask_positives = mask_positives.detach()
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), loss_mat * mask_positives)) / num_positives
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
def cross_entropy(logits, target, size_average=True):
if size_average:
return torch.mean(torch.sum(- target * F.log_softmax(logits, -1), -1))
else:
return torch.sum(torch.sum(- target * F.log_softmax(logits, -1), -1))
class NpairLoss(Module):
def __init__(self):
super(NpairLoss, self).__init__()
def forward(self, inputs, targets, normalized=False):
nB = inputs.size(0)
norm_temp = inputs.norm(p=2, dim=1, keepdim=True)
inputs_n = inputs.div(norm_temp.expand_as(inputs))
mm_logits = torch.mm(inputs_n, inputs_n.t()).detach()
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
cos_ap = mm_logits[(mask.float() - torch.eye(nB).float().cuda()) == 1].view(nB, -1)
cos_an = mm_logits[mask != 1].view(nB, -1)
avg_ap = torch.mean(cos_ap)
avg_an = torch.mean(cos_an)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
inputs = inputs * 5.0
labels = targets.view(-1).cpu().numpy()
pids = np.unique(labels)
anchor_idx = []
positive_idx = []
for i in pids:
ap_idx = np.where(labels == i)[0]
anchor_idx.append(ap_idx[0])
positive_idx.append(ap_idx[1])
anchor = inputs[anchor_idx, :]
positive = inputs[positive_idx, :]
batch_size = anchor.size(0)
target = torch.from_numpy(pids).cuda()
target = target.view(target.size(0), 1)
target = (target == torch.transpose(target, 0, 1)).float()
target = target / torch.sum(target, dim=1, keepdim=True).float()
logit = torch.matmul(anchor, torch.transpose(positive, 0, 1))
loss_ce = cross_entropy(logit, target)
loss = loss_ce * 1.0
return loss, avg_ap, avg_an
class MultiSimilarityLoss(Module):
def __init__(self):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.margin = 0.1
self.scale_pos = 2.0
self.scale_neg = 40.0
def forward(self, feats, labels):
norm = feats.norm(dim=1, p=2, keepdim=True)
feats = feats.div(norm.expand_as(feats))
labels = labels.view(-1)
assert feats.size(0) == labels.size(0), \
f"feats.size(0): {feats.size(0)} is not equal to labels.size(0): {labels.size(0)}"
batch_size = feats.size(0)
sim_mat = torch.matmul(feats, torch.t(feats))
epsilon = 1e-5
loss = list()
avg_aps = list()
avg_ans = list()
for i in range(batch_size):
pos_pair_ = sim_mat[i][labels == labels[i]]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][labels != labels[i]]
if len(neg_pair_) < 1 or len(pos_pair_) < 1:
continue
avg_aps.append(pos_pair_.mean())
avg_ans.append(neg_pair_.mean())
neg_pair = neg_pair_[neg_pair_ + self.margin > torch.min(pos_pair_)]
pos_pair = pos_pair_[pos_pair_ - self.margin < torch.max(neg_pair_)]
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
# weighting step
pos_loss = 1.0 / self.scale_pos * torch.log(
1 + torch.sum(torch.exp(-self.scale_pos * (pos_pair - self.thresh))))
neg_loss = 1.0 / self.scale_neg * torch.log(
1 + torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
print('with ms loss = 0 !')
loss = torch.zeros([], requires_grad=True).cuda()
else:
loss = sum(loss) / batch_size
loss = loss.view(-1)
avg_ap = sum(avg_aps) / batch_size
avg_an = sum(avg_ans) / batch_size
return loss, avg_ap, avg_an
| 2.46875
| 2
|
algo-c-to-_/src/tarai.py
|
nobi56/aRepo
| 0
|
12777504
|
<reponame>nobi56/aRepo
#
# from src/tarai.c
#
# int tarai(int, int, int) to tarai
# tarai to tak(*)
#
# *) https://en.wikipedia.org/wiki/Tak_(function)
#
def tarai(x, y, z):
if x <= y:
return y
return tarai(tarai(x-1,y,z), tarai(y-1,z,x), tarai(z-1,x,y))
def tak(x, y, z):
if x <= y:
return z
return tak(tak(x-1,y,z), tak(y-1,z,x), tak(z-1,x,y))
| 2.65625
| 3
|
python/src/zero/activations.py
|
d-ikeda-sakurasoft/deep-learning
| 0
|
12777505
|
<filename>python/src/zero/activations.py
from layers import *
from keras.datasets import mnist
from keras.utils import to_categorical
x = np.random.randn(1000, 100)
node_num = 100
hidden_layer_size = 5
activations = {}
for i in range(hidden_layer_size):
if i != 0:
x = activations[i - 1]
#w = np.random.randn(node_num, node_num) * 0.1
#w = np.random.randn(node_num, node_num) * 0.01
#w = np.random.randn(node_num, node_num) / np.sqrt(node_num)
w = np.random.randn(node_num, node_num) * np.sqrt(2 / node_num)
z = np.dot(x, w)
#a = sigmoid(z)
a = relu(z)
activations[i] = a
plt.figure(figsize=(20, 5))
for i, a in activations.items():
plt.subplot(1, len(activations), i + 1)
plt.hist(a.flatten(), 30, range=(0,1))
plt.savefig("activations.png")
| 3.0625
| 3
|
students/k3342/practical_works/Kataeva_Veronika/simple_django_web_project/django_project_kataeva/project_first_app/views.py
|
KataevaVeronika/ITMO_ICT_WebProgramming_2020
| 0
|
12777506
|
<filename>students/k3342/practical_works/Kataeva_Veronika/simple_django_web_project/django_project_kataeva/project_first_app/views.py
import datetime
from django.http import Http404
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView
from project_first_app.models import Ownership, Car, Owner
from project_first_app.forms import OwnerForm
class ListCars(ListView):
model = Car
class CreateCars(CreateView):
model = Car
fields = ['model', 'brand', 'color', 'car_number']
success_url = '/create_car/'
def get_owner(request, c_id):
try:
now = datetime.datetime.now()
date = str(now.year) + '-' + str(now.month) + '-' + str(now.day)
ownership = Ownership.objects.filter(car_id=c_id).filter(date_of_start__lte=date).filter(date_of_end__gte=date)[0]
owner = Owner.objects.get(id=ownership.owner_id)
except Car.DoesNotExist:
raise Http404("Does not exist")
return render(request, 'owner.html', {'owner': owner})
def list_owners(request):
context = {}
context['owners'] = Owner.objects.all()
return render(request, 'owners_list.html', context)
def create_owner(request):
context = {}
form = OwnerForm(request.POST or None)
if form.is_valid():
form.save()
context['form'] = form
return render(request, 'create_owner.html', context)
| 2.3125
| 2
|
src/tap_apple_search_ads/api/campaign.py
|
mighty-digital/tap-apple-search-ads
| 1
|
12777507
|
<reponame>mighty-digital/tap-apple-search-ads
"""Get All Campaigns stream"""
import json
from typing import Any, Dict, List, Optional
import requests
import singer
from tap_apple_search_ads import api
from tap_apple_search_ads.api.auth import RequestHeadersValue
logger = singer.get_logger()
DEFAULT_URL = "https://api.searchads.apple.com/api/v4/campaigns"
PROPERTIES_TO_SERIALIZE = {
"budgetOrders",
"countriesOrRegions",
"countryOrRegionServingStateReasons",
"locInvoiceDetails",
"servingStateReasons",
"supplySources",
}
def sync(headers: RequestHeadersValue) -> List[Dict[str, Any]]:
logger.info("Sync: campaigns")
response = requests.get(DEFAULT_URL, headers=headers)
api.utils.check_response(response)
campaigns = response.json()["data"]
logger.info("Synced [%s] campaings", len(campaigns))
return campaigns
def to_schema(record: Dict[str, Any]) -> Dict[str, Any]:
budgetAmount = record.pop("budgetAmount")
record["budgetAmount_currency"] = budgetAmount["currency"]
record["budgetAmount_amount"] = budgetAmount["amount"]
dailyBudgetAmount = record.pop("dailyBudgetAmount")
record["dailyBudgetAmount_currency"] = dailyBudgetAmount["currency"]
record["dailyBudgetAmount_amount"] = dailyBudgetAmount["amount"]
for key in PROPERTIES_TO_SERIALIZE:
value = record.pop(key)
record[key] = serialize(value)
return record
def serialize(value: Any) -> Optional[str]:
if value is None:
return None
value_str = json.dumps(value)
return value_str
| 2.8125
| 3
|
rough_trade_calendar/graphql.py
|
craiga/rough-trade-calendar
| 1
|
12777508
|
"""
GraphQL + Relay interface to Rough Trade Calendar data.
"""
import django_filters
import graphene
import graphene.relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from rough_trade_calendar import models
class CountConnection(graphene.Connection):
"""A connection which supports Relay's totalCount field."""
total_count = graphene.Int()
def resolve_total_count(self, *args): # pylint: disable=unused-argument
return self.length # pylint: disable=no-member
class Meta:
abstract = True
class EventFilterSet(django_filters.FilterSet):
"""Filter and order events by start_at."""
start_after = django_filters.DateTimeFilter("start_at", "gt")
start_before = django_filters.DateTimeFilter("start_at", "lt")
order_by = django_filters.OrderingFilter(fields={"start_at": "startAt"})
class Meta:
model = models.Event
fields = ["start_after", "start_before"]
class Event(DjangoObjectType):
"""An event."""
class Meta:
model = models.Event
fields = [
"id",
"name",
"description",
"url",
"image_url",
"start_at",
"location",
]
filterset_class = EventFilterSet
interfaces = [graphene.relay.Node]
connection_class = CountConnection
class Location(DjangoObjectType):
"""A location."""
class Meta:
model = models.Location
fields = ["id", "name", "timezone", "events"]
interfaces = [graphene.relay.Node]
connection_class = CountConnection
filter_fields = {"name": ["exact", "contains"]}
class Query(graphene.ObjectType):
all_locations = DjangoFilterConnectionField(Location, description="All locations.")
schema = graphene.Schema(query=Query)
| 2.265625
| 2
|
catkin_ws/src/navigation/src/sr_turns_node.py
|
DiegoOrtegoP/Software
| 12
|
12777509
|
#!/usr/bin/env python
import rospy
import numpy
from duckietown_msgs.msg import FSMState, AprilTags, BoolStamped
from std_msgs.msg import String, Int16 #Imports msg
class SRTurnsNode(object):
def __init__(self):
# Save the name of the node
self.node_name = rospy.get_name()
self.turn_type = -1
rospy.loginfo("[%s] Initialzing." %(self.node_name))
# Setup publishers
self.pub_turn_type = rospy.Publisher("~turn_type",Int16, queue_size=1, latch=True)
# Setup subscribers
self.sub_topic_mode = rospy.Subscriber("~mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialzed." %(self.node_name))
self.rate = rospy.Rate(30) # 10hz
def cbMode(self, mode_msg):
#print mode_msg
self.fsm_mode = mode_msg.state
if(self.fsm_mode == "INTERSECTION_CONTROL"):
# return only straight and right turn
availableTurns = [1,2]
#now randomly choose a possible direction
if(len(availableTurns)>0):
randomIndex = numpy.random.randint(len(availableTurns))
chosenTurn = availableTurns[randomIndex]
self.turn_type = chosenTurn
self.pub_turn_type.publish(self.turn_type)
rospy.loginfo("[%s] possible turns %s." %(self.node_name,availableTurns))
rospy.loginfo("[%s] Turn type now: %i" %(self.node_name,self.turn_type))
else:
self.turn_type = -1
self.pub_turn_type.publish(self.turn_type)
rospy.loginfo("[%s] Turn type: %i" %(self.node_name, self.turn_type))
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
if __name__ == '__main__':
# Initialize the node with rospy
rospy.init_node('sr_turns_node', anonymous=False)
# Create the NodeName object
node = SRTurnsNode()
# Setup proper shutdown behavior
rospy.on_shutdown(node.on_shutdown)
# Keep it spinning to keep the node alive
rospy.spin()
| 2.515625
| 3
|
src/byro/office/views/accounts.py
|
mhannig/byro
| 0
|
12777510
|
<reponame>mhannig/byro
from django import forms
from django.contrib import messages
from django.db import models
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DetailView, FormView, ListView
from byro.bookkeeping.models import Account, AccountCategory, Transaction
FORM_CLASS = forms.modelform_factory(Account, fields=["name", "account_category"])
ACCOUNT_COLUMN_HEADERS = {
# FIXME Check this with an accountant who is a native english speaker
AccountCategory.INCOME: (_("Charge"), _("Revenue")),
AccountCategory.ASSET: (_("Increase"), _("Decrease")),
AccountCategory.EQUITY: (_("Decrease"), _("Increase")),
AccountCategory.LIABILITY: (_("Decrease"), _("Increase")),
AccountCategory.EXPENSE: (_("Expense"), _("Rebate")),
}
class AccountListView(ListView):
template_name = "office/account/list.html"
context_object_name = "accounts"
model = Account
class AccountCreateView(FormView):
template_name = "office/account/add.html"
model = Account
form_class = FORM_CLASS
def form_valid(self, form):
form.save()
messages.success(
self.request,
_("The account was added, please edit additional details if applicable."),
)
self.form = form
return super().form_valid(form)
def get_success_url(self):
return reverse(
"office:finance.accounts.detail", kwargs={"pk": self.form.instance.pk}
)
class AccountDetailView(ListView):
template_name = "office/account/detail.html"
context_object_name = "bookings"
model = Transaction
paginate_by = 25
def get_object(self):
if not hasattr(self, "object"):
self.object = Account.objects.get(pk=self.kwargs["pk"])
return self.object
def get_queryset(self):
qs = self.get_object().bookings_with_transaction_data
if self.request.GET.get("filter") == "unbalanced":
qs = qs.exclude(
transaction_balances_debit=models.F("transaction_balances_credit")
)
qs = qs.filter(transaction__value_datetime__lte=now()).order_by(
"-transaction__value_datetime"
)
return qs
def get_form(self, request=None):
form = FORM_CLASS(request.POST if request else None, instance=self.get_object())
form.fields["account_category"].disabled = True
return form
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["form"] = self.get_form()
context["account"] = self.get_object()
context["ACCOUNT_COLUMN_HEADERS"] = ACCOUNT_COLUMN_HEADERS.get(
self.get_object().account_category, (_("Debit"), _("Credit"))
)
return context
def post(self, request, *args, **kwargs):
form = self.get_form(request)
if form.is_valid() and form.has_changed():
form.save()
messages.success(self.request, _("Your changes have been saved."))
return redirect(reverse("office:finance.accounts.detail", kwargs=self.kwargs))
class AccountDeleteView(DetailView):
model = Account
context_object_name = "account"
| 2.25
| 2
|
usage.py
|
mjclawar/sd-range-slider
| 2
|
12777511
|
<reponame>mjclawar/sd-range-slider
import sd_range_slider
import dash
import dash_html_components as html
app = dash.Dash('')
app.scripts.config.serve_locally = True
app.layout = html.Div([
# Test normal use case
html.Div(
sd_range_slider.SDRangeSlider(
id='input',
value=[1, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(1, 'Under 25'),
(2, '25 to 34'),
(3, '35 to 44'),
(4, '45+')]},
minVal=1,
maxVal=4,
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False)
),
html.Div(id='output'),
# Test categorical use case
html.Div(
sd_range_slider.SDRangeSlider(
id='input-categorical',
isCategorical=True,
value=[1, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(1, 'Under 25'),
(2, '25 to 34'),
(3, '35 to 44'),
(4, '45+')]},
minVal=1,
maxVal=4,
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
noValuesText='Any age',
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False,
)
),
html.Div(id='output-categorical'),
# Test restricted lower range
html.Div(
sd_range_slider.SDRangeSlider(
id='input-restricted-lower',
value=[2, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(2, '25 to 34'),
(3, '35 to 44'),
(4, '45+')]},
minVal=2,
maxVal=4,
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
restrictedLower=True,
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False)
),
html.Div(id='output-restricted-lower'),
# Test restricted higher range
html.Div(
sd_range_slider.SDRangeSlider(
id='input-restricted-higher',
value=[1, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(1, 'Under 25'),
(2, '25 to 34'),
(3, '35 to 44')]},
minVal=1,
maxVal=3,
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
restrictedHigher=True,
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False)
),
html.Div(id='output-restricted-higher'),
# Test restricted lower and higher
html.Div(
sd_range_slider.SDRangeSlider(
id='input-restricted-all',
value=[2, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(2, '25 to 34'),
(3, '35 to 44'),
(4, '45 to 49'),
(5, '50 to 54')]},
minVal=2,
maxVal=5,
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
restrictedHigher=True,
restrictedLower=True,
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False)
),
html.Div(id='output-restricted-all'),
# Test update on close
html.Div(
sd_range_slider.SDRangeSlider(
id='input-update-on-close',
value=[2, 3],
marks={val: {'label': label, 'style': {'font-size': '80%'}}
for val, label in [
(2, '25 to 34'),
(3, '35 to 44'),
(4, '45 to 49'),
(5, '50 to 54')]},
minVal=2,
maxVal=5,
updatemode='modalClose',
orHigherFormatter='{} or older',
orLowerFormatter='Under {} years old',
rangeFormatter='{} to {} years old',
allValuesText='All ages',
restrictedHigher=True,
restrictedLower=True,
humanName='Age cohort',
description='Test description magic',
singleValueFormatting=False)
),
html.Div(id='output-update-on-close'),
], style=dict(width=250))
@app.callback(
dash.dependencies.Output('output', 'children'),
[dash.dependencies.Input('input', 'value')])
def display_output(value):
return 'You have entered {}'.format(value)
@app.callback(
dash.dependencies.Output('output-categorical', 'children'),
[dash.dependencies.Input('input-categorical', 'value')])
def display_output(value):
return 'You have entered {}'.format(value)
@app.callback(
dash.dependencies.Output('output-restricted-lower', 'children'),
[dash.dependencies.Input('input-restricted-lower', 'value')])
def display_output(value):
return 'Restricted lower - You have entered {}'.format(value)
@app.callback(
dash.dependencies.Output('output-restricted-higher', 'children'),
[dash.dependencies.Input('input-restricted-higher', 'value')])
def display_output(value):
return 'Restricted higher - You have entered {}'.format(value)
@app.callback(
dash.dependencies.Output('output-restricted-all', 'children'),
[dash.dependencies.Input('input-restricted-all', 'value')])
def display_output(value):
return 'Restricted lower and higher - You have entered {}'.format(value)
@app.callback(
dash.dependencies.Output('output-update-on-close', 'children'),
[dash.dependencies.Input('input-update-on-close', 'value')])
def display_output(value):
return 'Update on close - You have entered {}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| 2.328125
| 2
|
cameo/mod/yuwei/utility/mailHelper.py
|
muchu1983/104_cameo
| 0
|
12777512
|
<reponame>muchu1983/104_cameo
#coding: utf-8
import smtplib
from email.mime.text import MIMEText
class mailHelper:
DEFAULT_SMTP = "smtp.gmail.com:587"
DEFAULT_ACCOUNT = "<EMAIL>"
DEFAULT_PASSWORD = "<PASSWORD>"
@staticmethod
def send(strSubject, strFrom, strTo, strMsg, lstStrTarget, strSmtp = None, strAccount = None, strPassword = None):
if strSmtp == None:
strSmtp = mailHelper.DEFAULT_SMTP
if strAccount == None:
strAccount = mailHelper.DEFAULT_ACCOUNT
if strPassword == None:
strPassword = mailHelper.DEFAULT_PASSWORD
msg = MIMEText(strMsg)
msg['Subject'] = strSubject
msg['From'] = strFrom
msg['To'] = strTo
try:
server = smtplib.SMTP(strSmtp)
server.ehlo()
server.starttls()
server.login(strAccount, strPassword)
server.sendmail(strAccount, lstStrTarget, msg.as_string())
server.quit()
except Exception, e:
print("[mailHelper] Sending mail failed! ErrorMessage:" + str(e))
| 2.859375
| 3
|
moldynplot/relaxation.py
|
KarlTDebiec/myplotspec_sim
| 8
|
12777513
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.relaxation.py
#
# Copyright (C) 2012-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes NMR relaxation and related data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################## FUNCTIONS ##################################
def spawn(function):
def run_function(queue_in, queue_out):
while True:
i, argument = queue_in.get()
if i is None:
break # 'None' signals that queue is empty
queue_out.put((i, function(argument)))
return run_function
def multiprocess_map(function, arguments, n_processes=1):
"""
Runs a *function* with *arguments* using *n_processes* Meant
as a replacement for multiproccessing.Pool.imap_unordered,
which can only accept module-level functions.
**Arguments:**
:*function*: Function to run
:*arguments*: Iterable of arguments to pass to function
:*n_processes: Number of processes to use
**Returns:**
:*results*: List of results returned from *function*
.. todo:
- Does this work, or can it be made to smoothly work, with more
complex arguments?
- Accept multiple functions, in addition to arguments
- Additional improvements likely possible
"""
from multiprocessing import Queue, Process
# Initialize queues
queue_in = Queue(1)
queue_out = Queue()
# Initialize processes and link to input and output queues
processes = [Process(target=spawn(function), args=(queue_in, queue_out))
for i in range(n_processes)]
for p in processes:
p.daemon = True
p.start()
# Construct input queue, including 'None' signals to terminate
input = [queue_in.put((i, argument)) for i, argument in
enumerate(arguments)]
for i in range(n_processes):
queue_in.put((None, None))
# Retrieve output queue
output = [queue_out.get() for i in range(len(input))]
# Rejoin processes and return results
for p in processes:
p.join()
return [x for i, x in sorted(output)]
def process_ired(infiles, outfile, indexfile=None, **kwargs):
"""
"""
from os import devnull
import re
from subprocess import Popen, PIPE
import pandas as pd
import numpy as np
r1r2noe_datasets = []
s2_datasets = []
# Load data
for i, infile in enumerate(infiles):
with open(devnull, "w") as fnull:
fields = Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip()
re_t1t2noe = re.compile(
"^#Vec\s+[\w_]+\[T1\]\s+[\w_]+\[T2\]\s+[\w_]+\[NOE\]$")
re_s2 = re.compile("^#Vec\s+[\w_]+\[S2\]$")
if re.match(re_t1t2noe, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0,
names=["r1", "r2", "noe"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
raw_data["r1"] = 1 / raw_data["r1"]
raw_data["r2"] = 1 / raw_data["r2"]
r1r2noe_datasets.append(raw_data)
elif re.match(re_s2, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0, names=["s2"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
s2_datasets.append(raw_data)
else:
raise Exception()
if indexfile is not None:
residue = np.loadtxt(indexfile, dtype=np.str).flatten()
# Process data
items = []
fmt = []
if indexfile is not None:
items.append(("residue", residue))
fmt.append("%12s")
else:
fmt.append("%12d")
if len(r1r2noe_datasets) >= 2:
r1r2noe_mean = pd.concat(r1r2noe_datasets).groupby(level=0).mean()
r1r2noe_std = pd.concat(r1r2noe_datasets).groupby(level=0).std()
items.extend([("r1", r1r2noe_mean["r1"]), ("r1 se", r1r2noe_std["r1"]),
("r2", r1r2noe_mean["r2"]), ("r2 se", r1r2noe_std["r2"]),
("noe", r1r2noe_mean["noe"]), ("noe se", r1r2noe_std["noe"])])
fmt.extend(
["%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f"])
elif len(r1r2noe_datasets) == 1:
r1r2noe_mean = r1r2noe_datasets[0]
items.extend([("r1", r1r2noe_mean["r1"]), ("r2", r1r2noe_mean["r2"]),
("noe", r1r2noe_mean["noe"])])
fmt.extend(["%11.5f", "%11.5f", "%11.5f"])
if len(s2_datasets) >= 2:
s2_mean = pd.concat(s2_datasets).groupby(level=0).mean()
s2_std = pd.concat(s2_datasets).groupby(level=0).std()
items.extend([("s2", s2_mean["s2"]), ("s2 se", s2_std["s2"])])
fmt.extend(["%11.5f", "%11.5f"])
elif len(s2_datasets) == 1:
s2_mean = s2_datasets[0]
items.extend([("s2", s2_mean["s2"])])
fmt.extend(["%11.5f"])
data = pd.DataFrame.from_items(items)
if indexfile is not None:
data.set_index("residue", inplace=True)
else:
data.index.name = "vector"
columns = [data.index.name] + list(data.columns.values)
header = "{0:<10s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
np.savetxt(outfile, np.column_stack((data.index.values, data.values)),
fmt=fmt, header=header, comments='#')
def process_error(sim_infiles, exp_infiles, outfile, **kwargs):
"""
"""
import pandas as pd
import numpy as np
if len(sim_infiles) != len(exp_infiles):
raise ValueError("""Number of simulation input files must
match number of experimental input files, as they are treated
pairwise. {0} simulation input file(s) and {1} experiment input
file(s) provided.""".format(len(sim_infiles), len(exp_infiles)))
# Work through each pair of infiles
errs = []
final_index = None
for sim_infile, exp_infile in zip(sim_infiles, exp_infiles):
print("Comparing simulation infile '{0}' ".format(
sim_infile) + "with experimental infile '{0}':".format(exp_infile))
# Load infiles and select shared indexes and columns
sim = pd.read_csv(sim_infile, delim_whitespace=True, index_col=0)
exp = pd.read_csv(exp_infile, delim_whitespace=True, index_col=0)
overlap = sim.index.intersection(exp.index)
if final_index is None:
final_index = exp.index
final_index = final_index.union(overlap)
sim = sim.loc[overlap]
exp = exp.loc[overlap]
err_cols = [c for c in sim.columns.values if
not c.endswith(" se") and c in exp.columns.values]
err_se_cols = [c + " se" for c in err_cols if
c + " se" in sim.columns.values and c + " se" in
exp.columns.values]
print(" Files share fields {0} and {1} for {2} residues".format(
str(map(str, err_cols)).replace("'", ""),
str(map(str, err_se_cols)).replace("'", ""), len(overlap)))
# Calculate error of available fields
err = pd.DataFrame(0, index=overlap,
columns=[x for t in zip(err_cols, err_se_cols) for x in t])
err[err_cols] = (
np.abs(exp[err_cols] - sim[err_cols]) / np.abs(exp[err_cols]))
# Calculate uncertainty of error of available fields
if len(err_se_cols) != 0:
err[err_se_cols] = 0
# //@formatter:off
err[err_se_cols] = np.sqrt(
(err[err_cols].values) ** 2 *
((np.sqrt(exp[err_se_cols].values ** 2 +
sim[err_se_cols].values ** 2) /
(exp[err_cols].values - sim[err_cols].values)) ** 2 +
(exp[err_se_cols].values / exp[ err_cols].values) ** 2))
# //@formatter:on
errs.append(err)
# Determine final columns and indexes
final_cols = []
final_index = sorted(final_index, key=lambda x: int(x.split(":")[1]))
for err in errs:
for col in err.columns.values:
if not col in final_cols:
final_cols.append(col)
# Sum the columns
final = pd.DataFrame(0.0, index=final_index, columns=final_cols)
counts = pd.DataFrame(0, index=final_index, columns=final_cols)
for err in errs:
for col in err.columns.values:
if not col.endswith(" se"):
final[col].loc[err.index] += err[col].loc[err.index]
else:
final[col].loc[err.index] += err[col].loc[err.index] ** 2
counts[col].loc[err.index] += 1
# Average the columns
print("Averaging fields:")
for col in final_cols:
if not col.endswith(" se"):
print(" Averaging field '{0}'".format(col))
final[col] /= counts[col]
else:
print(" Progagating uncertainty for field '{0}'".format(col))
final[col] = np.sqrt(final[col]) / counts[col]
# Write outfile
print(
"Writing outfile '{0}' with fields ".format(outfile) + "{0} for ".format(
str(map(str, final_cols)).replace("'", "")) + "{0} residues".format(
len(final_index)))
header = "residue "
for col in final_cols:
header += "{0:>12s}".format(col)
fmt = ["%12s"] + ["%11.5f"] * len(final_cols)
np.savetxt(outfile, np.column_stack((final.index.values, final.values)),
fmt=fmt, header=header, comments='#')
def process_relax(relax_type, peaklist, infiles, delays, error_method,
n_synth_datasets, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(delays) != len(infiles):
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
for infile, delay in zip(infiles, delays):
if verbose >= 1:
print("Loading intensities from '{0}'".format(infile))
parameters, intensity = nmrglue.pipe.read(infile)
hydrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=0).ppm_scale()
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
relax["{0} ms".format(delay)] = relax.apply(calc_intensity, axis=1)
# Calculate relaxation rates
delays = np.array(delays, np.float64) / 1000
def calc_relax(peak, **kwargs):
if verbose >= 1:
print("Calculating relaxation for {0}".format(peak.name))
def model_function(delay, intensity, relaxation):
return intensity * np.exp(-1 * delay * relaxation)
I = np.array(peak.filter(regex=(".*ms")).values, np.float64)
I0, R = curve_fit(model_function, delays, I, p0=(I[0], 1.0))[0]
# Calculate error
if error_method == "rmse":
error = np.sqrt(np.mean((I - model_function(delays, I0, R)) ** 2))
elif error_method == "mae":
error = np.mean(np.sqrt((I - model_function(delays, I0, R)) ** 2))
# Construct synthetic relaxation profiles
synth_datasets = np.zeros((n_synth_datasets, I.size))
for i, I_mean in enumerate(model_function(delays, I0, R)):
synth_datasets[:, i] = np.random.normal(I_mean, error,
n_synth_datasets)
def synth_fit_decay(synth_intensity):
try:
synth_I0, synth_R = \
curve_fit(model_function, delays, synth_intensity,
p0=(I0, R))[0]
return synth_R
except RuntimeError:
if verbose >= 1:
print("Unable to calculate standard error for {0}".format(
peak.name))
return np.nan
# Calculate standard error
synth_Rs = multiprocess_map(synth_fit_decay, synth_datasets, 16)
R_se = np.std(synth_Rs)
return pd.Series([I0, R, R_se])
# Calculate relaxation rates and standard errors
fit = relax.apply(calc_relax, axis=1)
fit.columns = ["I0", relax_type, relax_type + " se"]
relax = relax.join(fit)
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * len(delays) + ["%11d",
"%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_hetnoe(peaklist, infiles, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(infiles) != 2:
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[0]))
parameters, intensity = nmrglue.pipe.read(infiles[0])
hydrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=0).ppm_scale()
hydrogen += 0.0612858
nitrogen += 0.08399
relax["sat"] = relax.apply(calc_intensity, axis=1)
sat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(sat_se)
sat_se = 54588.8
print(sat_se)
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[1]))
parameters, intensity = nmrglue.pipe.read(infiles[1])
relax["nosat"] = relax.apply(calc_intensity, axis=1)
nosat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(nosat_se)
nosat_se = 58479.8
print(nosat_se)
relax["noe"] = relax["sat"] / relax["nosat"]
relax["noe se"] = np.sqrt(
(sat_se / relax["sat"]) ** 2 + (nosat_se / relax["nosat"]) ** 2) * relax[
"noe"]
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * 2 + ["%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_pre(dia_infile, para_infile, outfile, verbose=1, debug=0,
**kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import numpy as np
import pandas as pd
# Process arguments
dia_infile = glob(expandvars(dia_infile))[0]
para_infile = glob(expandvars(para_infile))[0]
if verbose >= 1:
print(
"Loading diamagnetic relaxation rates from '{0}'".format(dia_infile))
dia_relax = pd.read_csv(dia_infile, index_col=0, delimiter=r"\s\s+")
dia_relax.index.name = "residue"
dia_relax.rename(
columns={"I0": "dia I0", "I0 se": "dia I0 se", "r2": "dia r2",
"r2 se": "dia r2 se", }, inplace=True)
if verbose >= 1:
print("Loading paramagnetic relaxation rates from '{0}'".format(
para_infile))
para_relax = pd.read_csv(para_infile, index_col=0, delimiter=r"\s\s+")
para_relax.index.name = "residue"
para_relax.rename(
columns={"I0": "para I0", "I0 se": "para I0 se", "r2": "para r2",
"r2 se": "para r2 se", }, inplace=True)
relax = dia_relax[
["1H", "15N", "dia I0", "dia I0 se", "dia r2", "dia r2 se"]]
relax = pd.concat(
(relax, para_relax[["para I0", "para I0 se", "para r2", "para r2 se"]]),
axis=1)
# //@formatter:off
relax["I/I0"] = relax["para I0"] / relax["dia I0"]
relax["I/I0 se"] = np.sqrt(relax["I/I0"] ** 2 * \
((relax["para I0 se"] / relax["para I0"]) ** 2 + \
(relax["dia I0 se"] / relax["dia I0"]) ** 2))
relax["r20/r2"] = relax["dia r2"] / relax["para r2"]
relax["r20/r2 se"] = np.sqrt(relax["r20/r2"] ** 2 * \
((relax["dia r2 se"] / relax["dia r2"]) ** 2 + \
(relax["para r2 se"] / relax["para r2"]) ** 2))
relax["rho2"] = relax["para r2"] - relax["dia r2"]
relax["rho2 se"] = np.sqrt(
relax["para r2 se"] ** 2 + relax["dia r2 se"] ** 2)
# //@formatter:on
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
with open(outfile, "w") as out:
relax["dia I0"][np.isnan(relax["dia I0"])] = 0
relax["dia I0 se"][np.isnan(relax["dia I0 se"])] = 0
relax["para I0"][np.isnan(relax["para I0"])] = 0
relax["para I0 se"][np.isnan(relax["para I0 se"])] = 0
out.write("#" + header + "\n")
for residue in relax.index:
# This is an abonomination. Why is this the least painfil way to
# write a decent text file.
row = relax.loc[residue]
out.write("{0:12s} {1:11.2f} {2:11.1f} {3:11d} {4:11d} "
"{5:11.2f} {6:11.2f} {7:11d} {8:11d} {9:11.2f} "
"{10:11.2f} {11:11.3f} {12:11.3f} {13:11.3f} "
"{14:11.3f} {15:11.2f} {16:11.2f}\n".format(residue,
row["1H"], row["15N"], int(row["dia I0"]), int(row["dia I0 se"]),
row["dia r2"], row["dia r2 se"], int(row["para I0"]),
int(row["para I0 se"]), row["para r2"], row["para r2 se"],
row["I/I0"], row["I/I0 se"], row["r20/r2"], row["r20/r2 se"],
row["rho2"], row["rho2 se"]))
#################################### MAIN #####################################
if __name__ == "__main__":
import argparse
# Prepare argument parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest="mode", description="")
# Prepare iRED subparser
ired_subparser = subparsers.add_parser(name="ired",
help="Process iRED data")
ired_subparser.set_defaults(function=process_ired)
input_group = ired_subparser.add_argument_group("input")
action_group = ired_subparser.add_argument_group("action")
output_group = ired_subparser.add_argument_group("output")
input_group.add_argument("-infile", required=True, dest="infiles",
nargs="+", type=str, help="""cpptraj output file(s) from
which to load datasets; may be plain text or compressed""")
input_group.add_argument("-indexfile", required=False, type=str,
help="""Text file from which to load residue names; if
omitted will be taken from columns of first infile""")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare error subparser
error_subparser = subparsers.add_parser(name="error", help="""Calculates
error of simulated relaxation relative to experiment""",
description="""Calculates error of simulated relaxation relative to
experiment. The intended use case is to break down errors relative to
experimental data collected at multiple magnetic fields or by multiple
groups, error(residue, measurement, magnet/group), into a form that
is easier to visualize and communicate, error(residue, measurement).
Reads in a series of input files containing simulated data and a
series of files containing corresponding experimental data. These
files are treated in pairs and the error between all data points
present in both(e.g. row 'GLN:2', column 'r1') calculated. Columns
ending in '_se' are treated as uncertainties, and are propogated into
uncertainties in the resulting errors rather than being averaged.
Take caution when processing datasets uncertainties alongside those
that do (experimental uncertainties are not always reported), as
the resulting uncertainties in the residuals will be incorrect.""")
error_subparser.set_defaults(function=process_error)
input_group = error_subparser.add_argument_group("input")
action_group = error_subparser.add_argument_group("action")
output_group = error_subparser.add_argument_group("output")
input_group.add_argument("-sim_infile", required=True, dest="sim_infiles",
nargs="+", type=str,
help="input file(s) from which to load simulation datasets")
input_group.add_argument("-exp_infile", required=True, dest="exp_infiles",
nargs="+", type=str,
help="input file(s) from which to load experimental datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare relax subparser
relax_subparser = subparsers.add_parser(name="relax",
help="Process experimental R1 or R2 relaxation data")
relax_subparser.set_defaults(function=process_relax)
input_group = relax_subparser.add_argument_group("input")
action_group = relax_subparser.add_argument_group("action")
output_group = relax_subparser.add_argument_group("output")
relax_type = input_group.add_mutually_exclusive_group()
relax_type.add_argument("--r1", action="store_const", const="r1",
default="r1", dest="relax_type", help="process R1 relaxation data")
relax_type.add_argument("--r2", action="store_const", const="r2",
default="r1", dest="relax_type", help="process R2 relaxation data")
relax_type.add_argument("--pre-dia", action="store_const", const="dia",
default="r1", dest="relax_type",
help="process PRE diamagnetic relaxation data")
relax_type.add_argument("--pre-para", action="store_const", const="para",
default="r1", dest="relax_type",
help="process PRE paramagnetic relaxation data")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs="+", type=str,
help="NMR spectra (NMRPipe format)")
input_group.add_argument("-delay", required=True, dest="delays",
metavar="DELAY", nargs="+", type=str,
help="delays (ms); number of delays must match number of infiles")
action_group.add_argument("-synthetics", required=False,
dest="n_synth_datasets", default=100, type=int,
help="number of synthetic datasets to use to calculate error")
error_method = action_group.add_mutually_exclusive_group()
error_method.add_argument("--rmse", action="store_const", const="rmse",
default="rmse", dest="error_method",
help="use root mean square error to generate synthetic datasets")
error_method.add_argument("--mae", action="store_const", const="mae",
default="rmse", dest="error_method",
help="use mean absolute error to generate synthetic datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare hetnoe subparser
hetnoe_subparser = subparsers.add_parser(name="hetnoe",
help="Process experimental heteronuclear NOE relaxation data")
hetnoe_subparser.set_defaults(function=process_hetnoe)
input_group = hetnoe_subparser.add_argument_group("input")
action_group = hetnoe_subparser.add_argument_group("action")
output_group = hetnoe_subparser.add_argument_group("output")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs=2, type=str, help="NMR spectra (NMRPipe format)")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare pre subparser
pre_subparser = subparsers.add_parser(name="pre",
help="Process experimental heteronuclear NOE relaxation data")
pre_subparser.set_defaults(function=process_pre)
input_group = pre_subparser.add_argument_group("input")
action_group = pre_subparser.add_argument_group("action")
output_group = pre_subparser.add_argument_group("output")
input_group.add_argument("-dia", required=True, dest="dia_infile",
metavar="DIA_INFILE", type=str, help="Diamagnetic relaxation rates")
input_group.add_argument("-para", required=True, dest="para_infile",
metavar="PARA_INFILE", type=str, help="Paramagnetic relaxation rates")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Verbosity
for p in subparsers.choices.values():
verbosity = p.add_mutually_exclusive_group()
verbosity.add_argument("-v", "--verbose", action="count", default=1,
help="enable verbose output, may be specified more than once")
verbosity.add_argument("-q", "--quiet", action="store_const", const=0,
default=1, dest="verbose", help="disable verbose output")
# Parse arguments and run selected function
kwargs = vars(parser.parse_args())
kwargs.pop("function")(**kwargs)
| 2.859375
| 3
|
src/comms/imc2lib/imc2_trackers.py
|
abbacode/avaloria
| 0
|
12777514
|
"""
Certain periodic packets are sent by connected MUDs (is-alive, user-cache,
etc). The IMC2 protocol assumes that each connected MUD will capture these and
populate/maintain their own lists of other servers connected. This module
contains stuff like this.
"""
from time import time
class IMC2Mud(object):
"""
Stores information about other games connected to our current IMC2 network.
"""
def __init__(self, packet):
self.name = packet.origin
self.versionid = packet.optional_data.get('versionid', None)
self.networkname = packet.optional_data.get('networkname', None)
self.url = packet.optional_data.get('url', None)
self.host = packet.optional_data.get('host', None)
self.port = packet.optional_data.get('port', None)
self.sha256 = packet.optional_data.get('sha256', None)
# This is used to determine when a Mud has fallen into inactive status.
self.last_updated = time()
class IMC2MudList(object):
"""
Keeps track of other MUDs connected to the IMC network.
"""
def __init__(self):
# Mud list is stored in a dict, key being the IMC Mud name.
self.mud_list = {}
def get_mud_list(self):
"""
Returns a sorted list of connected Muds.
"""
muds = self.mud_list.items()
muds.sort()
return [value for key, value in muds]
def update_mud_from_packet(self, packet):
"""
This grabs relevant info from the packet and stuffs it in the
Mud list for later retrieval.
"""
mud = IMC2Mud(packet)
self.mud_list[mud.name] = mud
def remove_mud_from_packet(self, packet):
"""
Removes a mud from the Mud list when given a packet.
"""
mud = IMC2Mud(packet)
try:
del self.mud_list[mud.name]
except KeyError:
# No matching entry, no big deal.
pass
class IMC2Channel(object):
"""
Stores information about channels available on the network.
"""
def __init__(self, packet):
self.localname = packet.optional_data.get('localname', None)
self.name = packet.optional_data.get('channel', None)
self.level = packet.optional_data.get('level', None)
self.owner = packet.optional_data.get('owner', None)
self.policy = packet.optional_data.get('policy', None)
self.last_updated = time()
class IMC2ChanList(object):
"""
Keeps track of other MUDs connected to the IMC network.
"""
def __init__(self):
# Chan list is stored in a dict, key being the IMC Mud name.
self.chan_list = {}
def get_channel_list(self):
"""
Returns a sorted list of cached channels.
"""
channels = self.chan_list.items()
channels.sort()
return [value for key, value in channels]
def update_channel_from_packet(self, packet):
"""
This grabs relevant info from the packet and stuffs it in the
channel list for later retrieval.
"""
channel = IMC2Channel(packet)
self.chan_list[channel.name] = channel
def remove_channel_from_packet(self, packet):
"""
Removes a channel from the Channel list when given a packet.
"""
channel = IMC2Channel(packet)
try:
del self.chan_list[channel.name]
except KeyError:
# No matching entry, no big deal.
pass
| 3.3125
| 3
|
Y2018/day2/python/day2.py
|
Khranovskiy/advent-of-code
| 0
|
12777515
|
<filename>Y2018/day2/python/day2.py
import itertools
print(*[''.join(a for a,b in zip(this,that) if a == b) for this,that in combinations(open('inp', 'r').readlines(),2) if len([a for a,b in zip(this,that) if a != b]) == 1])
| 3.25
| 3
|
1.Study/2. with computer/4.Programming/2.Python/8. Python_intermediate/p_chapter02_01.py
|
jskim0406/Study
| 0
|
12777516
|
# -*- coding: utf-8 -*-#
# chapter 02-01
# 객체지향 프로그래밍(OOP) (<-> 절차지향) 장점 : 코드 재사용, 코드 중복 방지, 유지 보수 쉬움, 대형 프로젝트 관리 용이
# 규모가 큰 프로젝트 수행 시, 과거에는 함수 중심으로 코딩됨(함수에서 함수 호출하며 복잡해짐) -> 데이터가 방대해질 수록 개선 어려움 (구조 복잡)
# 클래스 중심 -> 객체로 관리
# 일반적인 코딩
# 차량 1
car_company1 = 'Ferrari'
car_detail1 = [
{'color' : 'white'},
{'horse_power' : 400}
]
# 차량2
car_company2 = 'BMW'
car_detail2 = [
{'color' : 'black'},
{'horse_power' : 270}
]
# 차량3
car_company3 = 'Audi'
car_detail3 = [
{'color' : 'orange'},
{'horse_power' : 350}
]
# 리스트 구조
# 관리하기 불편, 인덱스로 접근해야 함 (딕셔너리는 key값으로 조회가 가능)
car_company_list = ['Ferrari','BMW','Audi']
car_detail_list = [
{'color' : 'white', 'horse_power' : 400},
{'color' : 'black', 'horse_power' : 270},
{'color' : 'orange', 'horse_power' : 350}
]
# del car_company_list[1]
# del car_detail_list[1]
#
# print(car_company_list, car_detail_list)
# 딕셔너리 구조
car_dicts = [
{'car_company' : 'Ferrari', 'car_detail' : {'color' : 'white', 'horse_power' : 400}},
{'car_company' : 'BMW', 'car_detail' : {'color' : 'black', 'horse_power' : 270}},
{'car_company' : 'Audi', 'car_detail' : {'color' : 'orange', 'horse_power' : 350}}
]
print(car_dicts[0]['car_company'],car_dicts[0]['car_detail'])
print()
print()
# 클래스 구조
# 구조 설계 후 : 재사용성 증가, 코드 반복 최소화, 메소드 활용
class Car():
def __init__(self,company,detail):
self._company = company
self._detail = detail
# print(class object) 시 리턴 값 ex) print(car1)
def __str__(self):
return 'str : {} - {}'.format(self._company,self._detail)
# class object 호출 시 리턴 값 ex) car1
def __repr__(self):
return 'repr : {} - {}'.format(self._company,self._detail)
car1 = Car('Ferrari',{'color' : 'white','horse_power' : 400})
car2 = Car('BMW',{'color' : 'black','horse_power' : 270})
car3 = Car('Audi',{'color' : 'orange','horse_power' : 350})
# 객체.__dict__ : 객체의 attribute, value값 확인 가능
print(car1.__dict__)
print(car2.__dict__)
print(car3.__dict__)
print()
print()
# 객체 내 메타정보 확인 가능(매직 메소드)
print(dir(car1))
print()
print()
car_list = []
car_list.append(car1)
car_list.append(car2)
car_list.append(car3)
# __repr__로 설정된 값들이 객체마다 출력될 것
print(car_list)
print()
print()
for x in car_list:
print(x)
print()
print()
| 2.515625
| 3
|
VoigtFit/VoigtFit_example.py
|
InspectorDidi/VoigtFit
| 2
|
12777517
|
import numpy as np
import matplotlib.pyplot as plt
import VoigtFit
import pickle
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### This command prepares the line regions:
# First the data are interactively normalized
# Then regions which should not be fitted are masked interactively too
dataset.prepare_dataset()
# Save the dataset so you don't have to normalize and mask every time:
VoigtFit.SaveDataSet('test.dataset', dataset)
### The dataset which was defined above can be loaded like this:
# In this case, comment out lines 18-41
#dataset = VoigtFit.LoadDataSet('test.dataset')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components('ZnII', 'FeII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components('CrII', 'FeII', logN=13.6, ref_comp=1)
dataset.copy_components('MgI', 'FeII', logN=12.4, ref_comp=1)
dataset.prepare_dataset()
popt, chi2 = dataset.fit(verbose=True)
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_abundance()
#### Remove parameter links
#### The links may result in error when loadning the parameters later.
for par in popt.params.values():
par.expr = None
for par in dataset.pars.values():
par.expr = None
pickle.dump(popt.params, open('example_best_fit.pars','w'))
VoigtFit.SaveDataSet('example_fit.dataset', dataset)
| 2.1875
| 2
|
heat/common/utils.py
|
devcamcar/heat
| 1
|
12777518
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import sys
import uuid
from eventlet import event
from eventlet import greenthread
from eventlet import semaphore
from eventlet.green import subprocess
from heat.openstack.common import exception
from heat.openstack.common import timeutils
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def generate_uuid():
return str(uuid.uuid4())
def gen_uuid():
return uuid.uuid4()
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = timeutils.utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, now=True):
self._running = True
done = event.Event()
def _inner():
if not now:
greenthread.sleep(interval)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
| 2.03125
| 2
|
polecat/data/examples/helloworld/helloworld/project.py
|
furious-luke/polecat
| 4
|
12777519
|
<reponame>furious-luke/polecat
from polecat.project import Project
class HelloWorldProject(Project):
bundle = 'bundle.js'
| 1.289063
| 1
|
books/python-3-oop-packt/Chapter10/10_10_decorator_syntax.py
|
phiratio/lpthw
| 73
|
12777520
|
@log_calls
def test1(a,b,c):
print("\ttest1 called")
| 1.210938
| 1
|
tests/utils/test_application.py
|
SpiNNakerManchester/nengo_spinnaker
| 13
|
12777521
|
import mock
import pytest
from nengo_spinnaker.utils import application
@pytest.mark.parametrize("app_name", ["Arthur", "Robin"])
def test_get_application(app_name):
with mock.patch.object(application, "pkg_resources") as pkg_resources:
pkg_resources.resource_filename.return_value = "Camelot"
# Get the application filename
assert application.get_application(app_name) == "Camelot"
pkg_resources.resource_filename.assert_called_once_with(
"nengo_spinnaker", "binaries/nengo_{}.aplx".format(app_name)
)
| 2.40625
| 2
|
config.py
|
namaggarwal/transaction-reminder
| 0
|
12777522
|
<reponame>namaggarwal/transaction-reminder
FLASK_SECRET_KEY = 'namana'
DATABASE_URI = 'sqlite:///test.db'
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
GOOGLE_CLIENT_ID = ''
GOOGLE_CLIENT_SECRET = ''
WUNDERLIST_CLIENT_ID = ''
WUNDERLIST_CLIENT_SECRET = ''
WUNDERLIST_NAME = 'Splitwise'
APPLICATION_ROOT = None
GOOGLE_ANALYTIC_ENABLED = False
GOOGLE_ANALYTIC_ID = ''
BEHIND_PROXY = False
| 1.234375
| 1
|
envs/mujoco/humanoid_env.py
|
artberryx/LSD
| 7
|
12777523
|
<filename>envs/mujoco/humanoid_env.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from collections import defaultdict
from gym import utils
import numpy as np
from gym.envs.mujoco import mujoco_env
from envs.mujoco.mujoco_utils import MujocoTrait
def mass_center(sim):
mass = np.expand_dims(sim.model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
# pylint: disable=missing-docstring
class HumanoidEnv(MujocoTrait, mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
expose_obs_idxs=None,
expose_all_qpos=True,
model_path=None,
task='forward',
goal=None,
fixed_initial_state=False,
num_action_repeats=None,
done_allowing_step_unit=None,
fixed_mpl=None,
original_env=False,
render_hw=100,
):
utils.EzPickle.__init__(**locals())
if model_path is None:
model_path = 'humanoid.xml'
self._task = task
self._goal = goal
if self._task == "follow_goals":
self._goal_list = [
np.array([3.0, -0.5]),
np.array([6.0, 8.0]),
np.array([12.0, 12.0]),
]
self._goal = self._goal_list[0]
print("Following a trajectory of goals:", self._goal_list)
self._expose_obs_idxs = expose_obs_idxs
self._expose_all_qpos = expose_all_qpos
self.fixed_initial_state = fixed_initial_state
self._num_action_repeats = num_action_repeats
self._done_allowing_step_unit = done_allowing_step_unit
self._fixed_mpl = fixed_mpl
self._original_env = original_env
self.render_hw = render_hw
xml_path = "envs/mujoco/assets/"
model_path = os.path.abspath(os.path.join(xml_path, model_path))
mujoco_env.MujocoEnv.__init__(self, model_path, 5)
def _get_obs(self):
data = self.sim.data
if self._original_env:
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
data = self.sim.data
if self._expose_all_qpos:
obs = np.concatenate([
data.qpos.flat, data.qvel.flat,
# data.cinert.flat, data.cvel.flat,
# data.qfrc_actuator.flat, data.cfrc_ext.flat
])
else:
obs = np.concatenate([
data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat,
data.qfrc_actuator.flat, data.cfrc_ext.flat
])
if self._expose_obs_idxs is not None:
obs = obs[self._expose_obs_idxs]
return obs
# def compute_reward(self, ob, next_ob, action=None):
# xposbefore = ob[:, 0]
# yposbefore = ob[:, 1]
# xposafter = next_ob[:, 0]
# yposafter = next_ob[:, 1]
#
# forward_reward = (xposafter - xposbefore) / self.dt
# sideward_reward = (yposafter - yposbefore) / self.dt
#
# if action is not None:
# ctrl_cost = .5 * np.square(action).sum(axis=1)
# survive_reward = 1.0
# if self._task == "forward":
# reward = forward_reward - ctrl_cost + survive_reward
# elif self._task == "backward":
# reward = -forward_reward - ctrl_cost + survive_reward
# elif self._task == "left":
# reward = sideward_reward - ctrl_cost + survive_reward
# elif self._task == "right":
# reward = -sideward_reward - ctrl_cost + survive_reward
# elif self._task in ["goal", "follow_goals"]:
# reward = -np.linalg.norm(
# np.array([xposafter, yposafter]).T - self._goal, axis=1)
# elif self._task in ["sparse_goal"]:
# reward = (-np.linalg.norm(
# np.array([xposafter, yposafter]).T - self._goal, axis=1) >
# -0.3).astype(np.float32)
# return reward
def compute_reward(self, **kwargs):
return None
def step(self, a, render=False):
if hasattr(self, '_step_count'):
self._step_count += 1
obsbefore = self._get_obs()
pos_before = mass_center(self.sim)
xposbefore = self.sim.data.qpos.flat[0]
yposbefore = self.sim.data.qpos.flat[1]
if self._num_action_repeats is None:
self.do_simulation(a, self.frame_skip)
else:
for i in range(self._num_action_repeats):
self.do_simulation(a, self.frame_skip)
obsafter = self._get_obs()
pos_after = mass_center(self.sim)
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
def _get_dads_humanoid_reward():
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 0.25 * (
pos_after - pos_before) / self.sim.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward
def _get_gym_humanoid_reward():
# gym/envs/mujoco/humanoid.py
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward
qpos = self.sim.data.qpos
if hasattr(self, '_done_internally') and self._done_allowing_step_unit is not None:
self._done_internally = (self._done_internally or bool((qpos[2] < 1.0) or (qpos[2] > 2.0)))
done = (self._done_internally and self._step_count % self._done_allowing_step_unit == 0)
else:
done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))
reward = self.compute_reward(xposbefore=xposbefore, yposbefore=yposbefore, xposafter=xposafter, yposafter=yposafter, cur_done=done)
if reward is None:
reward = _get_gym_humanoid_reward()
if self._task == "follow_goals":
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
reward = -np.linalg.norm(np.array([xposafter, yposafter]).T - self._goal)
# update goal
if np.abs(reward) < 0.5:
self._goal = self._goal_list[0]
self._goal_list = self._goal_list[1:]
print("Goal Updated:", self._goal)
elif self._task == "goal":
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
reward = -np.linalg.norm(np.array([xposafter, yposafter]).T - self._goal)
ob = self._get_obs()
info = dict(
#reward_linvel=lin_vel_cost,
#reward_quadctrl=-quad_ctrl_cost,
#reward_alive=alive_bonus,
#reward_impact=-quad_impact_cost,
coordinates=np.array([xposbefore, yposbefore]),
next_coordinates=np.array([xposafter, yposafter]),
ori_obs=obsbefore,
next_ori_obs=obsafter,
)
if render:
info['render'] = self.render(mode='rgb_array').transpose(2, 0, 1)
return ob, reward, done, info
def reset_model(self):
self._step_count = 0
self._done_internally = False
c = 0.01
if self.fixed_initial_state:
self.set_state(
self.init_qpos,
self.init_qvel)
else:
self.set_state(
self.init_qpos + np.random.uniform(
low=-c, high=c, size=self.sim.model.nq),
self.init_qvel + np.random.uniform(
low=-c,
high=c,
size=self.sim.model.nv,
))
if self._task == "follow_goals":
self._goal = self._goal_list[0]
self._goal_list = self._goal_list[1:]
print("Current goal:", self._goal)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 2.0
def calc_eval_metrics(self, trajectories, is_option_trajectories, num_coord_dims=2):
eval_metrics = super().calc_eval_metrics(trajectories, is_option_trajectories, num_coord_dims)
return eval_metrics
| 2.046875
| 2
|
data_cleaning/bad_parallel_fixes.py
|
sharad461/nepali-translator
| 29
|
12777524
|
from functions import _read, write_lines
import re
a, b = _read("1.en"), _read("1.ne")
# For English
# Joins an incomplete line to the line above
i = 1
while i < len(a):
if re.match("^([a-z0-9])+[^0-9i\.\)]", a[i]):
a[i-1] = a[i-1].strip() + ' ' + a[i].strip()
del(a[i])
else:
i += 1
# Joins a numeral line to the next line
i = 0
while i < len(a)-1:
if len(a[i]) < 3 and re.match("^([a-z0-9]){1,2}[\.\)]\s*", a[i]):
a[i] = a[i].strip() + ' ' + a[i+1].strip()
del(a[i+1])
i += 1
write_lines(a, "1_bpf.en")
# For Nepali
# Removes lines with only purnabiraams
i = 0
while i < len(b):
if re.match("^\।", b[i]):
del(b[i])
i += 1
# Joins a numeral line to the next line
i = 0
while i < len(b)-1:
if len(b[i]) < 3 and re.match("^([a-z0-9]){1,2}[\.\)]\s*", b[i]):
b[i] = b[i].strip() + ' ' + b[i+1].strip()
del(b[i+1])
i += 1
write_lines(b, "1_bpf.ne")
| 2.890625
| 3
|
python/experiments/SVGD/goodwin12.py
|
DrawZeroPoint/VIPS
| 12
|
12777525
|
from time import time
import os
import numpy as np
from scipy.stats import multivariate_normal
from experiments.lnpdfs.create_target_lnpfs import build_Goodwin_grad
from sampler.SVGD.python.svgd import SVGD as SVGD
unknown_params = [1, 2] + np.arange(4, 12).tolist()
num_dimensions = len(unknown_params)
seed=1
target_lnpdf = build_Goodwin_grad(unknown_params, seed=seed, sigma=np.sqrt(0.2),
parameters=np.array([10., 1.97, 0.46, 0.53,
0.02878028, 0.13585575, 1.57070286, 0.75737477,
0.28929913, 1.52671658, 1.26995194, 1.89562767]))
def dlnpdf(theta):
input = np.atleast_2d(theta)
dlnpdf.counter += len(input)
return target_lnpdf(input)[1]
dlnpdf.counter = 0
def sample(n_samps, n_iter, epsilon, path):
if path is not None:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
prior = multivariate_normal(np.zeros((num_dimensions)), np.eye(num_dimensions))
x0 = prior.rvs(n_samps)
start = time()
samples = SVGD().update(x0, dlnpdf, n_iter=n_iter, stepsize=epsilon, path=path)
end = time()
np.savez(path, samples=samples, wallclocktime=end-start, nfevals=dlnpdf.counter)
print("done")
if __name__ == '__main__':
sample(100, 100, 1e-2, "/tmp/svgd_frisk_test")
| 2.0625
| 2
|
datasets/metadataset.py
|
luukschagen/Thesis_code
| 0
|
12777526
|
<gh_stars>0
import torch.utils.data as data
from math import pi
import torch
class MetaDataset(data.Dataset):
def __init__(self, task_num, k_shot, k_query, n_way=None):
super(MetaDataset, self).__init__()
self.task_num = task_num
self.k_shot = k_shot
self.k_query = k_query
self.n_way = n_way
self.x_s, self.x_q, self.y_s, self.y_q = (None for _ in range(4))
def __len__(self):
return self.task_num
def __getitem__(self, item):
return self.x_s[item], self.x_q[item], self.y_s[item], self.y_q[item]
class Sinusoid(MetaDataset):
def __init__(self, task_num, k_shot, k_query, amp_range=(0.1, 5),
phase_range=(0, 2 * pi), freq_range=(1, 1), noise=0.3):
super(Sinusoid, self).__init__(task_num=task_num, k_shot=k_shot, k_query=k_query)
self.amp_range = amp_range
self.phase_range = phase_range
self.freq_range = freq_range
self.noise = noise
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
x_s = torch.rand((1, self.k_shot)) * 10 - 5
x_q = torch.rand((1, self.k_query)) * 10 - 5
amp = (torch.rand(1) * (self.amp_range[1] - self.amp_range[0]) + self.amp_range[0]).view(-1, 1)
phase = (torch.rand(1) * (self.phase_range[1] - self.phase_range[0]) + self.phase_range[0]).view(-1, 1)
freq = (torch.rand(1) * (self.freq_range[1] - self.freq_range[0]) + self.freq_range[0]).view(-1, 1)
e_s = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_shot]).view(self.k_shot, 1).transpose(0, 1)
e_q = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_query]).view(self.k_query, 1).transpose(0, 1)
y_s = (amp * torch.sin(freq * x_s + phase)) + e_s
y_q = (amp * torch.sin(freq * x_q + phase)) + e_q
x_s = x_s.view(self.k_shot, 1)
x_q = x_q.view(self.k_query, 1)
y_s = y_s.view(self.k_shot, 1)
y_q = y_q.view(self.k_query, 1)
return x_s, x_q, y_s, y_q
class Linear(MetaDataset):
def __init__(self, task_num, k_shot, k_query, alpha_range=(-3, 3), beta_range=(-3, 3), noise=0.3):
super(Linear, self).__init__(task_num, k_shot, k_query)
self.alpha_range = alpha_range
self.beta_range = beta_range
self.noise = noise
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
x_s = torch.rand((1, self.k_shot)) * 10 - 5
x_q = torch.rand((1, self.k_query)) * 10 - 5
alpha = (torch.rand(1) * (self.alpha_range[1] - self.alpha_range[0]) + self.alpha_range[0]).view(-1, 1)
beta = (torch.rand(1) * (self.beta_range[1] - self.beta_range[0]) + self.beta_range[0]).view(-1, 1)
e_s = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_shot]).view(self.k_shot, 1).transpose(0, 1)
e_q = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_query]).view(self.k_query, 1).transpose(0, 1)
y_s = alpha * x_s + beta + e_s
y_q = alpha * x_q + beta + e_q
x_s = x_s.view(self.k_shot, 1)
x_q = x_q.view(self.k_query, 1)
y_s = y_s.view(self.k_shot, 1)
y_q = y_q.view(self.k_query, 1)
return x_s, x_q, y_s, y_q
class Quadratic(MetaDataset):
def __init__(self, task_num, k_shot, k_query, alpha_range=(0.02, 0.15),
beta_range=(-3, 3), c_range=(-3, 3), noise=0.3):
super(Quadratic, self).__init__(task_num, k_shot, k_query)
self.alpha_range = alpha_range
self.beta_range = beta_range
self.c_range = c_range
self.noise = noise
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
x_s = torch.rand((1, self.k_shot)) * 10 - 5
x_q = torch.rand((1, self.k_query)) * 10 - 5
alpha = (torch.rand(1) * (self.alpha_range[1] - self.alpha_range[0]) + self.alpha_range[0]).view(-1, 1)
sign = (-1 if torch.randint(2, tuple([1])) == 0 else 1)
alpha = alpha * sign
beta = (torch.rand(1) * (self.beta_range[1] - self.beta_range[0]) + self.beta_range[0]).view(-1, 1)
c = (torch.rand(1) * (self.c_range[1] - self.c_range[0]) + self.c_range[0]).view(-1, 1)
e_s = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_shot]).view(self.k_shot, 1).transpose(0, 1)
e_q = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_query]).view(self.k_query, 1).transpose(0, 1)
y_s = alpha * (x_s - c) ** 2 + beta + e_s
y_q = alpha * (x_q - c) ** 2 + beta + e_q
x_s = x_s.view(self.k_shot, 1)
x_q = x_q.view(self.k_query, 1)
y_s = y_s.view(self.k_shot, 1)
y_q = y_q.view(self.k_query, 1)
return x_s, x_q, y_s, y_q
class L1Norm(MetaDataset):
def __init__(self, task_num, k_shot, k_query, alpha_range=(-3, 3),
beta_range=(-3, 3), c_range=(-3, 3), noise=0.3):
super(L1Norm, self).__init__(task_num, k_shot, k_query)
self.alpha_range = alpha_range
self.beta_range = beta_range
self.c_range = c_range
self.noise = noise
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
x_s = torch.rand((1, self.k_shot)) * 10 - 5
x_q = torch.rand((1, self.k_query)) * 10 - 5
alpha = (torch.rand(1) * (self.alpha_range[1] - self.alpha_range[0]) + self.alpha_range[0]).view(-1, 1)
beta = (torch.rand(1) * (self.beta_range[1] - self.beta_range[0]) + self.beta_range[0]).view(-1, 1)
c = (torch.rand(1) * (self.c_range[1] - self.c_range[0]) + self.c_range[0]).view(-1, 1)
e_s = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_shot]).view(self.k_shot, 1).transpose(0, 1)
e_q = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_query]).view(self.k_query, 1).transpose(0, 1)
y_s = alpha * torch.abs(x_s - c) + beta + e_s
y_q = alpha * torch.abs(x_q - c) + beta + e_q
x_s = x_s.view(self.k_shot, 1)
x_q = x_q.view(self.k_query, 1)
y_s = y_s.view(self.k_shot, 1)
y_q = y_q.view(self.k_query, 1)
return x_s, x_q, y_s, y_q
class Tanh(MetaDataset):
def __init__(self, task_num, k_shot, k_query, alpha_range=(-3, 3),
beta_range=(-3, 3), c_range=(-3, 3), noise=0.3):
super(Tanh, self).__init__(task_num, k_shot, k_query)
self.alpha_range = alpha_range
self.beta_range = beta_range
self.c_range = c_range
self.noise = noise
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
x_s = torch.rand((1, self.k_shot)) * 10 - 5
x_q = torch.rand((1, self.k_query)) * 10 - 5
alpha = (torch.rand(1) * (self.alpha_range[1] - self.alpha_range[0]) + self.alpha_range[0]).view(-1, 1)
beta = (torch.rand(1) * (self.beta_range[1] - self.beta_range[0]) + self.beta_range[0]).view(-1, 1)
c = (torch.rand(1) * (self.c_range[1] - self.c_range[0]) + self.c_range[0]).view(-1, 1)
e_s = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_shot]).view(self.k_shot, 1).transpose(0, 1)
e_q = torch.distributions.Normal(0, torch.Tensor([self.noise for _ in range(1)])).sample(
[self.k_query]).view(self.k_query, 1).transpose(0, 1)
y_s = alpha * torch.tanh(x_s - c) + beta + e_s
y_q = alpha * torch.tanh(x_q - c) + beta + e_q
x_s = x_s.view(self.k_shot, 1)
x_q = x_q.view(self.k_query, 1)
y_s = y_s.view(self.k_shot, 1)
y_q = y_q.view(self.k_query, 1)
return x_s, x_q, y_s, y_q
class MultiModal(MetaDataset):
def __init__(self, task_num, k_shot, k_query, modes=5):
super(MultiModal, self).__init__(task_num, k_shot, k_query)
self.modes = modes
self._counter = 0
tasks_per_mode = task_num//modes
self.datasets = [Sinusoid(tasks_per_mode, k_shot, k_query), Linear(tasks_per_mode, k_shot, k_query)]
if modes >= 3:
self.datasets.append(Quadratic(tasks_per_mode, k_shot, k_query))
if modes == 5:
self.datasets.append(L1Norm(tasks_per_mode, k_shot, k_query))
self.datasets.append(Tanh(tasks_per_mode, k_shot, k_query))
if modes == 4:
raise NotImplementedError("4 modes is not part of the experiments")
if modes > 5:
raise NotImplementedError("5 modes is the maximum")
if modes == 2:
self.datasets = [Tanh(tasks_per_mode, k_shot, k_query), L1Norm(tasks_per_mode, k_shot, k_query)]
def __getitem__(self, item):
if item >= self.task_num:
raise StopIteration
index = self._counter % self.modes
self._counter += 1
return self.datasets[index][0]
if __name__ == '__main__':
pass
| 1.992188
| 2
|
sdk/python/approzium/_postgres/scram.py
|
UpGado/approzium
| 59
|
12777527
|
<reponame>UpGado/approzium<filename>sdk/python/approzium/_postgres/scram.py
import base64
import re
# try to import the secrets library from Python 3.6+ for the
# cryptographic token generator for generating nonces as part of SCRAM
# Otherwise fall back on os.urandom
try:
from secrets import token_bytes as generate_token_bytes
except ImportError:
from os import urandom as generate_token_bytes
class SCRAMAuthentication:
AUTHENTICATION_METHODS = [b"SCRAM-SHA-256"]
DEFAULT_CLIENT_NONCE_BYTES = 16 # 24
REQUIREMENTS_CLIENT_FINAL_MESSAGE = ["client_channel_binding", "server_nonce"]
REQUIREMENTS_CLIENT_PROOF = [
"password_<PASSWORD>",
"password_<PASSWORD>",
"server_first_message",
"server_nonce",
]
def __init__(self, authentication_method):
self.authentication_method = authentication_method
self.authorization_message = None
# channel binding is turned off for the time being
self.client_channel_binding = b"n,,"
self.client_first_message_bare = None
self.client_nonce = None
self.client_proof = None
self.password_salt = None
self.password_<PASSWORD> = None
self.server_first_message = None
self.server_key = None
self.server_nonce = None
def create_client_first_message(self, username):
"""Create the initial client message for SCRAM authentication"""
self.client_nonce = self._generate_client_nonce(self.DEFAULT_CLIENT_NONCE_BYTES)
# set the client first message bare here, as it's used in a later step
self.client_first_message_bare = (
b"n=" + username.encode("utf-8") + b",r=" + self.client_nonce
)
# put together the full message here
msg = bytes()
msg += self.authentication_method + b"\0"
client_first_message = (
self.client_channel_binding + self.client_first_message_bare
)
msg += (len(client_first_message)).to_bytes(
4, byteorder="big"
) + client_first_message
return msg
def create_client_final_message(self, client_proof):
"""Create the final client message as part of SCRAM authentication"""
if any(
[
getattr(self, val) is None
for val in self.REQUIREMENTS_CLIENT_FINAL_MESSAGE
]
):
raise Exception("you need values from server to generate a client proof")
# generate the client proof
msg = bytes()
msg += (
b"c="
+ base64.b64encode(self.client_channel_binding)
+ b",r="
+ self.server_nonce
+ b",p="
+ client_proof.encode("ascii")
)
return msg
def parse_server_first_message(self, server_response):
"""Parse the response from the first message from the server"""
self.server_first_message = server_response
try:
self.server_nonce = re.search(
b"r=([^,]+),", self.server_first_message
).group(1)
except IndexError:
raise Exception("could not get nonce")
if not self.server_nonce.startswith(self.client_nonce):
raise Exception("invalid nonce")
try:
self.password_salt = re.search(
b"s=([^,]+),", self.server_first_message
).group(1)
except IndexError:
raise Exception("could not get salt")
try:
self.password_iterations = int(
re.search(b"i=(\d+),?", self.server_first_message).group(1) # noqa:W605
)
except (IndexError, TypeError, ValueError):
raise Exception("could not get iterations")
def verify_server_final_message(self, server_final_message):
"""Verify the final message from the server"""
try:
server_signature = re.search(b"v=([^,]+)", server_final_message).group(1)
except IndexError:
raise Exception("could not get server signature")
return server_signature == self.server_signature.encode("ascii")
def _generate_client_nonce(self, num_bytes):
token = generate_token_bytes(num_bytes)
return base64.b64encode(token)
def _generate_auth_msg(self):
self.authorization_message = (
self.client_first_message_bare
+ b","
+ self.server_first_message
+ b",c="
+ base64.b64encode(self.client_channel_binding)
+ b",r="
+ self.server_nonce
)
| 2.578125
| 3
|
pipeline/data/Zhang/_source/helper.py
|
Voineagulab/NeuroCirc
| 0
|
12777528
|
<reponame>Voineagulab/NeuroCirc
import csv, re, os, math
if __name__ == '__main__':
write_file1 = csv.writer(open("zhang.csv", 'w', newline=''), delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)
write_file1.writerow(["id", "symbol", "ensembl"])
write_file2 = csv.writer(open("zhang_cpm.csv", 'w', newline=''), delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)
write_file2.writerow(["id", "FBN", "H9_ESC"])
iter = csv.reader(open("./zhang_full.csv", 'r'), delimiter=',')
next(iter)
for line in iter:
write_file1.writerow([line[0], line[2], ""])
write_file2.writerow([line[0], line[3], line[4]])
| 2.40625
| 2
|
Ago-Dic-2017/Enrique Castillo/Práctica1/Agencia.py
|
Andremm303/DAS_Sistemas
| 0
|
12777529
|
<gh_stars>0
class Agencia:
def __init__(self, nomAgencia, direccion):
self.nomAgencia = nomAgencia
self.direccion = direccion
def getNomAgencia(self):
return self.nomAgencia
def setNomAgencia(self, nombrAgencia):
self.nombrAgencia = nombrAgencia
def getDireccion(self):
return self.direccion
def setDireccion(self, direccion):
self.direccion = direccion
def atribAgencia(self):
return "Agencia: {}\nDirección: {}\n".format(self.nomAgencia, self.direccion)
| 2.875
| 3
|
aerisweather/responses/AerisLocation.py
|
jkoelndorfer/aerisweather-python-sdk
| 5
|
12777530
|
<filename>aerisweather/responses/AerisLocation.py
class AerisLocation:
""" Defines an object for the Aeris API loc data returned in an Aeris API responses. """
def __init__(self, json_data=None):
""" Constructor """
self.data = json_data
@property
def long(self)->float:
""" Returns the longitude of the location as a float. """
return self.data["long"]
@property
def lat(self) -> float:
""" Returns the latitude of the location as a float. """
return self.data["lat"]
| 2.96875
| 3
|
src/Expired_Filter/ChromeDriver.py
|
brianfong96/Experiment_Web_Scraping
| 1
|
12777531
|
<reponame>brianfong96/Experiment_Web_Scraping
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
def CreateDriver(extra_arguments = ["--start-maximized"]):
arguments = ['--ignore-certificate-errors', '--incognito', '--headless']
arguments += extra_arguments
#Use selenium and open webdriver
options = webdriver.ChromeOptions()
for arg in arguments:
options.add_argument(arg)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
return driver
def SetupDriver():
driver = CreateDriver()
if driver:
driver.quit()
return
if __name__ == "__main__":
driver = CreateDriver()
pass
| 3.171875
| 3
|
python/frequency_calc.py
|
amojarro/carrierseq
| 5
|
12777532
|
<reponame>amojarro/carrierseq<filename>python/frequency_calc.py
import sys
channel_out = open(sys.argv[1], 'r')
channel_list = channel_out.readlines()
xcrit_value_txt = open(sys.argv[2], 'r')
xcrit_value = xcrit_value_txt.read()
xcrit = float(xcrit_value)
# channel_list: A list containing strings of each channel with newline characters
# new_channel_list: A list containing strings of each channel
# channel_list_num: A list containing integers of each channel
# First we strip the newline characters in a loop
new_channel_list = []
i = 0
for element in channel_list:
new_channel_list.append(channel_list[i].rstrip('\n'))
i = i + 1
# Next we convert each element to an integer in a loop
ind = 0
channel_list_num = []
for element in new_channel_list:
channel_list_num.append(int(new_channel_list[ind]))
ind = ind + 1
# Next we create a dictionary where each element is in the format of "channel: frequency"
channel_freq = {x:channel_list_num.count(x) for x in channel_list_num}
# print channel_freq
target_channels = dict()
hqnr_channels = dict()
for channel in channel_freq:
if channel_freq[channel] <= xcrit:
target_channels[channel] = channel_freq[channel]
else:
hqnr_channels[channel] = channel_freq[channel]
# Save roi channel frequency dictionary
with open(sys.argv[3], 'w') as f:
sys.stdout = f
print channel_freq
# save hqnr channel dictionary
with open(sys.argv[4], 'w') as f:
sys.stdout = f
print hqnr_channels
# save target reads channel dictionary
with open(sys.argv[5], 'w') as f:
sys.stdout = f
print target_channels
# print only target channels used for sorting
with open(sys.argv[6], 'w') as f:
sys.stdout = f
for item in target_channels.keys():
print item
| 3.515625
| 4
|
test_test1.py
|
scottohalloran/python-sample-vscode-flask-tutorial
| 0
|
12777533
|
def func(a):
return a - 1
def test_testmethod():
assert func(6) -- 5
| 2.390625
| 2
|
sim_correlation.py
|
Renata1995/Topic-Distance-and-Coherence
| 5
|
12777534
|
from scipy import stats
import sys
import utils.name_convention as name
from similarity.SimTopicLists import SimTopicLists
if len(sys.argv) <= 1:
src = "pp_reuters"
else:
src = sys.argv[1]
stl = SimTopicLists()
distance_list, rank_list = [], []
jtotal, ktotal, cos_total, kl_total, bha_total = [], [], [], [], []
jtotal_rank, ktotal_rank, costotal_rank, kltotal_rank, bhatotal_rank = [], [], [], [], []
for corpus_type in ["tfidf", "bow", "binary"]:
for topics_count in [10,20,30,40,50]:
dname = name.get_output_dir(corpus_type, topics_count, src)
ofile = open(dname + "/sim_jaccard.txt", "r")
jlist = stl.read_distance_list(ofile)
jtotal.extend(jlist)
ofile = open(dname + "/sim_kendall.txt", "r")
klist = stl.read_distance_list(ofile)
ktotal.extend(klist)
ofile = open(dname + "/sim_cosine.txt", "r")
cos_list = stl.read_distance_list(ofile)
cos_total.extend(cos_list)
ofile = open(dname + "/sim_kl.txt", "r")
kl_list = stl.read_distance_list(ofile)
kl_total.extend(kl_list)
ofile = open(dname + "/sim_bha.txt", "r")
bha_list = stl.read_distance_list(ofile)
bha_total.extend(bha_list)
jrank = stl.give_dist_names(jlist, topics_count, corpus_type)
jtotal_rank.extend(jrank)
krank = stl.give_dist_names(klist, topics_count, corpus_type)
ktotal_rank.extend(krank)
cos_rank = stl.give_dist_names(cos_list, topics_count, corpus_type)
costotal_rank.extend(cos_rank)
kl_rank = stl.give_dist_names(kl_list, topics_count, corpus_type)
kltotal_rank.extend(kl_rank)
bha_rank = stl.give_dist_names(bha_list, topics_count, corpus_type)
bhatotal_rank.extend(bha_rank)
distance_list.append(("jaccard", jtotal))
distance_list.append(("kendall", ktotal))
distance_list.append(("cos", cos_total))
distance_list.append(("kl", kl_total))
distance_list.append(("bha", bha_total))
jtotal_rank = list(sorted(jtotal_rank, key=lambda x:x[1]))
jtotal_rank = [v[0] for v in jtotal_rank]
ktotal_rank = list(sorted(ktotal_rank, key=lambda x:x[1]))
ktotal_rank = [v[0] for v in ktotal_rank]
costotal_rank = list(sorted(costotal_rank, key=lambda x:x[1]))
costotal_rank = [v[0] for v in costotal_rank]
kltotal_rank = list(sorted(kltotal_rank, key=lambda x:x[1]))
kltotal_rank = [v[0] for v in kltotal_rank]
bhatotal_rank = list(sorted(bhatotal_rank, key=lambda x:x[1]))
bhatotal_rank = [v[0] for v in bhatotal_rank]
rank_list.append(("jaccard", jtotal_rank))
rank_list.append(("kendall", ktotal_rank))
rank_list.append(("cos", costotal_rank))
rank_list.append(("kl", kltotal_rank))
rank_list.append(("bha", bhatotal_rank))
ofile = open("sim_correlation.txt", "w")
for index, list1 in enumerate(distance_list[1:]):
for list2 in distance_list[:index+1]:
sim_values1 = list1[1]
sim_values2 = list2[1]
ofile.write(list1[0]+" " + list2[0]+" : ")
ofile.write(str(stats.pearsonr(sim_values1, sim_values2))+"\n")
ofile = open("sim_rank.txt","w")
for index, list1 in enumerate(rank_list[1:]):
for list2 in rank_list[:index+1]:
sim_values1 = list1[1]
sim_values2 = list2[1]
ofile.write(list1[0]+" " + list2[0]+" : ")
ofile.write(str(stats.kendalltau(sim_values1, sim_values2))+"\n")
| 2.15625
| 2
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_policy_repository_oper.py
|
tkamata-test/ydk-py
| 0
|
12777535
|
<filename>cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_policy_repository_oper.py
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'GroupEnum' : _MetaInfoEnum('GroupEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper',
{
'address-family-group':'address_family_group',
'session-group':'session_group',
'neighbor-group':'neighbor_group',
'neighbor':'neighbor',
'error-group':'error_group',
}, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']),
'AttachPointDirectionEnum' : _MetaInfoEnum('AttachPointDirectionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper',
{
'in':'in_',
'out':'out',
}, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']),
'SubAddressFamilyEnum' : _MetaInfoEnum('SubAddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper',
{
'unicast':'unicast',
'multicast':'multicast',
'label':'label',
'tunnel':'tunnel',
'vpn':'vpn',
'mdt':'mdt',
'vpls':'vpls',
'rt-constraint':'rt_constraint',
'mvpn':'mvpn',
'flow':'flow',
'vpn-mcast':'vpn_mcast',
'saf-none':'saf_none',
'saf-unknown':'saf_unknown',
}, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']),
'AddressFamilyEnum' : _MetaInfoEnum('AddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper',
{
'ipv4':'ipv4',
'ipv6':'ipv6',
'l2vpn':'l2vpn',
'ls':'ls',
'af-none':'af_none',
'af-unknown':'af_unknown',
}, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']),
'ObjectStatusEnum' : _MetaInfoEnum('ObjectStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper',
{
'active':'active',
'inactive':'inactive',
'unused':'unused',
}, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']),
'RoutingPolicy.Limits' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Limits',
False,
[
_MetaInfoClassMember('compiled-policies-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total compiled length of all policies
''',
'compiled_policies_length',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('current-lines-of-policy-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of lines of configuration for
policies/sets currently allowed
''',
'current_lines_of_policy_limit',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('current-lines-of-policy-used', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Current number of lines configured for all
policies and sets
''',
'current_lines_of_policy_used',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('current-number-of-policies-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of policies currently allowed
''',
'current_number_of_policies_limit',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('current-number-of-policies-used', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Current number of policies configured
''',
'current_number_of_policies_used',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('maximum-lines-of-policy', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum lines of configuration allowable for all
policies and sets
''',
'maximum_lines_of_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('maximum-number-of-policies', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum number of policies allowable
''',
'maximum_number_of_policies',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'limits',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'directly-used-policies',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets',
False,
[
_MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None,
[], [],
''' Domain of sets
''',
'set_domain',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Names of sets in this domain
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets',
False,
[
_MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets',
[], [],
''' List of sets in several domains
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'all-used-sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets',
False,
[
_MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None,
[], [],
''' Domain of sets
''',
'set_domain',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Names of sets in this domain
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets',
False,
[
_MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets',
[], [],
''' List of sets in several domains
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'directly-used-sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'all-used-policies',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses',
False,
[
_MetaInfoClassMember('all-used-policies', REFERENCE_CLASS, 'AllUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies',
[], [],
''' Policies used by this policy, or by policies
that it uses
''',
'all_used_policies',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('all-used-sets', REFERENCE_CLASS, 'AllUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets',
[], [],
''' Sets used by this policy, or by policies
that it uses
''',
'all_used_sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('directly-used-policies', REFERENCE_CLASS, 'DirectlyUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies',
[], [],
''' Policies that this policy uses directly
''',
'directly_used_policies',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('directly-used-sets', REFERENCE_CLASS, 'DirectlyUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets',
[], [],
''' Sets that this policy uses directly
''',
'directly_used_sets',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'policy-uses',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies.RoutePolicy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Route policy name
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('policy-uses', REFERENCE_CLASS, 'PolicyUses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses',
[], [],
''' Information about which policies and sets
this policy uses
''',
'policy_uses',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'route-policy',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.RoutePolicies' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies',
False,
[
_MetaInfoClassMember('route-policy', REFERENCE_LIST, 'RoutePolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy',
[], [],
''' Information about an individual policy
''',
'route_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'route-policies',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Policies' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Policies',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policies', REFERENCE_CLASS, 'RoutePolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies',
[], [],
''' Information about individual policies
''',
'route_policies',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'policies',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.OspfArea' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'ospf-area',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityOpaque' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-opaque',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySegNh' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-seg-nh',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunitySoo' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-soo',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Tag' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'tag',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Prefix' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'prefix',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Community' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'community',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.AsPath' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'as-path',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityBandwidth' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth',
False,
[
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-bandwidth',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityRt' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-rt',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.Rd' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'rd',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy.Reference' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy.Reference',
False,
[
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of policy
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum',
[], [],
''' Active, Inactive, or Unused
''',
'status',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Whether the policy uses this object directly or
indirectly
''',
'used_directly',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'reference',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy',
False,
[
_MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy.Reference',
[], [],
''' Information about policies referring to this
object
''',
'reference',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'used-by',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached.Binding' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached.Binding',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Address Family Identifier
''',
'af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Aggregate IP address or Network IP Address
in IPv4 or IPv6 Format
''',
'aggregate_network_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' OSPF Area ID in Decimal Integer Format
''',
'area_id',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of attach point where policy is attached
''',
'attach_point',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' The attached policy that (maybe indirectly) uses
the object in question
''',
'attached_policy',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum',
[], [],
''' Direction In or Out
''',
'direction',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum',
[], [],
''' Neighbor Group
''',
'group',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor Group Name
''',
'group_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Instance
''',
'instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' Neighbor IP Address
''',
'neighbor_address',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum',
[], [],
''' Neighbor IP Address Family
''',
'neighbor_af_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate From Level
''',
'propogate_from',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' ISIS Propogate To Level
''',
'propogate_to',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol instance
''',
'proto_instance',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Protocol to which policy attached
''',
'protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy that uses object in question
''',
'route_policy_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum',
[], [],
''' Subsequent Address Family Identifier
''',
'saf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Protocol to redistribute,
Source Protocol can be one of the following
values {all,
connected, local, static, bgp, rip, isis, ospf,
ospfv3, eigrp, unknown }
''',
'source_protocol',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'binding',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached',
False,
[
_MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached.Binding',
[], [],
''' bindings list
''',
'binding',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'attached',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set',
False,
[
_MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Set name
''',
'set_name',
'Cisco-IOS-XR-policy-repository-oper', True),
_MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached',
[], [],
''' Information about where this policy or set is
attached
''',
'attached',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy',
[], [],
''' Policies that use this object, directly or
indirectly
''',
'used_by',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'set',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets_',
False,
[
_MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set',
[], [],
''' Information about an individual set
''',
'set',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Unused' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Unused',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'unused',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Inactive' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Inactive',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'inactive',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost.Active' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Active',
False,
[
_MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Policy objects
''',
'object',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets.ExtendedCommunityCost' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Active',
[], [],
''' All objects of a given type that are attached to
a protocol
''',
'active',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Inactive',
[], [],
''' All objects of a given type that are not
attached to a protocol
''',
'inactive',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets_',
[], [],
''' Information about individual sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Unused',
[], [],
''' All objects of a given type that are not
referenced at all
''',
'unused',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'extended-community-cost',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy.Sets' : {
'meta_info' : _MetaInfoClass('RoutingPolicy.Sets',
False,
[
_MetaInfoClassMember('as-path', REFERENCE_CLASS, 'AsPath' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath',
[], [],
''' Information about AS Path sets
''',
'as_path',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('community', REFERENCE_CLASS, 'Community' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community',
[], [],
''' Information about Community sets
''',
'community',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-bandwidth', REFERENCE_CLASS, 'ExtendedCommunityBandwidth' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth',
[], [],
''' Information about Extended Community Bandwidth
sets
''',
'extended_community_bandwidth',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-cost', REFERENCE_CLASS, 'ExtendedCommunityCost' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost',
[], [],
''' Information about Extended Community Cost sets
''',
'extended_community_cost',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-opaque', REFERENCE_CLASS, 'ExtendedCommunityOpaque' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque',
[], [],
''' Information about Extended Community Opaque
sets
''',
'extended_community_opaque',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-rt', REFERENCE_CLASS, 'ExtendedCommunityRt' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt',
[], [],
''' Information about Extended Community RT sets
''',
'extended_community_rt',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-seg-nh', REFERENCE_CLASS, 'ExtendedCommunitySegNh' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh',
[], [],
''' Information about Extended Community SegNH sets
''',
'extended_community_seg_nh',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('extended-community-soo', REFERENCE_CLASS, 'ExtendedCommunitySoo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo',
[], [],
''' Information about Extended Community SOO sets
''',
'extended_community_soo',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('ospf-area', REFERENCE_CLASS, 'OspfArea' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea',
[], [],
''' Information about OSPF Area sets
''',
'ospf_area',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('prefix', REFERENCE_CLASS, 'Prefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix',
[], [],
''' Information about AS Path sets
''',
'prefix',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('rd', REFERENCE_CLASS, 'Rd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd',
[], [],
''' Information about RD sets
''',
'rd',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('tag', REFERENCE_CLASS, 'Tag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag',
[], [],
''' Information about Tag sets
''',
'tag',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'sets',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
'RoutingPolicy' : {
'meta_info' : _MetaInfoClass('RoutingPolicy',
False,
[
_MetaInfoClassMember('limits', REFERENCE_CLASS, 'Limits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Limits',
[], [],
''' Information about configured limits and the
current values
''',
'limits',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('policies', REFERENCE_CLASS, 'Policies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies',
[], [],
''' Information about configured route policies
''',
'policies',
'Cisco-IOS-XR-policy-repository-oper', False),
_MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets',
[], [],
''' Information about configured sets
''',
'sets',
'Cisco-IOS-XR-policy-repository-oper', False),
],
'Cisco-IOS-XR-policy-repository-oper',
'routing-policy',
_yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper'
),
},
}
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies']['meta_info']
_meta_table['RoutingPolicy.Policies.RoutePolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info']
_meta_table['RoutingPolicy.Policies.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info']
_meta_table['RoutingPolicy.Policies.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info']
_meta_table['RoutingPolicy.Policies.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info']
_meta_table['RoutingPolicy.Sets.Community.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets_']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info']
_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.Tag']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.Prefix']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.Community']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.AsPath']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.Rd']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info']
_meta_table['RoutingPolicy.Limits']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info']
_meta_table['RoutingPolicy.Policies']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info']
_meta_table['RoutingPolicy.Sets']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info']
| 1.453125
| 1
|
accelbyte_py_sdk/api/platform/models/currency_update.py
|
encyphered/accelbyte-python-sdk
| 0
|
12777536
|
<filename>accelbyte_py_sdk/api/platform/models/currency_update.py
# Auto-generated at 2021-09-27T17:12:36.265221+08:00
# from: Justice Platform Service (3.24.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class CurrencyUpdate(Model):
"""A DTO for update currency
Properties:
localization_descriptions: (localizationDescriptions) OPTIONAL Dict[str, str]
max_amount_per_transaction: (maxAmountPerTransaction) OPTIONAL int
max_transaction_amount_per_day: (maxTransactionAmountPerDay) OPTIONAL int
max_balance_amount: (maxBalanceAmount) OPTIONAL int
"""
# region fields
localization_descriptions: Dict[str, str] # OPTIONAL
max_amount_per_transaction: int # OPTIONAL
max_transaction_amount_per_day: int # OPTIONAL
max_balance_amount: int # OPTIONAL
# endregion fields
# region with_x methods
def with_localization_descriptions(self, value: Dict[str, str]) -> CurrencyUpdate:
self.localization_descriptions = value
return self
def with_max_amount_per_transaction(self, value: int) -> CurrencyUpdate:
self.max_amount_per_transaction = value
return self
def with_max_transaction_amount_per_day(self, value: int) -> CurrencyUpdate:
self.max_transaction_amount_per_day = value
return self
def with_max_balance_amount(self, value: int) -> CurrencyUpdate:
self.max_balance_amount = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "localization_descriptions") and self.localization_descriptions:
result["localizationDescriptions"] = {str(k0): str(v0) for k0, v0 in self.localization_descriptions.items()}
elif include_empty:
result["localizationDescriptions"] = {}
if hasattr(self, "max_amount_per_transaction") and self.max_amount_per_transaction:
result["maxAmountPerTransaction"] = int(self.max_amount_per_transaction)
elif include_empty:
result["maxAmountPerTransaction"] = int()
if hasattr(self, "max_transaction_amount_per_day") and self.max_transaction_amount_per_day:
result["maxTransactionAmountPerDay"] = int(self.max_transaction_amount_per_day)
elif include_empty:
result["maxTransactionAmountPerDay"] = int()
if hasattr(self, "max_balance_amount") and self.max_balance_amount:
result["maxBalanceAmount"] = int(self.max_balance_amount)
elif include_empty:
result["maxBalanceAmount"] = int()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
localization_descriptions: Optional[Dict[str, str]] = None,
max_amount_per_transaction: Optional[int] = None,
max_transaction_amount_per_day: Optional[int] = None,
max_balance_amount: Optional[int] = None,
) -> CurrencyUpdate:
instance = cls()
if localization_descriptions is not None:
instance.localization_descriptions = localization_descriptions
if max_amount_per_transaction is not None:
instance.max_amount_per_transaction = max_amount_per_transaction
if max_transaction_amount_per_day is not None:
instance.max_transaction_amount_per_day = max_transaction_amount_per_day
if max_balance_amount is not None:
instance.max_balance_amount = max_balance_amount
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CurrencyUpdate:
instance = cls()
if not dict_:
return instance
if "localizationDescriptions" in dict_ and dict_["localizationDescriptions"] is not None:
instance.localization_descriptions = {str(k0): str(v0) for k0, v0 in dict_["localizationDescriptions"].items()}
elif include_empty:
instance.localization_descriptions = {}
if "maxAmountPerTransaction" in dict_ and dict_["maxAmountPerTransaction"] is not None:
instance.max_amount_per_transaction = int(dict_["maxAmountPerTransaction"])
elif include_empty:
instance.max_amount_per_transaction = int()
if "maxTransactionAmountPerDay" in dict_ and dict_["maxTransactionAmountPerDay"] is not None:
instance.max_transaction_amount_per_day = int(dict_["maxTransactionAmountPerDay"])
elif include_empty:
instance.max_transaction_amount_per_day = int()
if "maxBalanceAmount" in dict_ and dict_["maxBalanceAmount"] is not None:
instance.max_balance_amount = int(dict_["maxBalanceAmount"])
elif include_empty:
instance.max_balance_amount = int()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"localizationDescriptions": "localization_descriptions",
"maxAmountPerTransaction": "max_amount_per_transaction",
"maxTransactionAmountPerDay": "max_transaction_amount_per_day",
"maxBalanceAmount": "max_balance_amount",
}
# endregion static methods
| 1.617188
| 2
|
conformer/configs/model.py
|
dudgns0908/KoASR
| 0
|
12777537
|
<reponame>dudgns0908/KoASR<filename>conformer/configs/model.py<gh_stars>0
from dataclasses import dataclass
@dataclass
class ConformerLargeConfig:
encoder_dim: int = 512
num_encoder_layers: int = 17
num_attention_heads: int = 8
conv_kernel_size: int = 31
dropout_p: float = 0.1
| 1.78125
| 2
|
fortytwocli/main.py
|
dhaiibfiukkiu/42cli
| 4
|
12777538
|
#!/usr/bin/env python
# -*- coding: utf=8 -*-
import click
import fortytwocli.init as init_
import fortytwocli.status as status_
import fortytwocli.project as project
import fortytwocli.util as util
import fortytwocli.ipCalc as ip
@click.group()
def fourtyTwo():
pass
@fourtyTwo.command(help="initializes settings.")
def init():
init_.init()
@fourtyTwo.command(help="shows your status.")
def status():
try:
util.checkConfigExists()
status_.showStatus()
except Exception as e:
click.secho(str(e), fg='red')
@fourtyTwo.command(name="clone-project", help="clone project.")
def cloneProject():
try:
util.checkConfigExists()
project.cloneProject()
except Exception as e:
click.secho(str(e), fg='red')
@fourtyTwo.command(name="ip", help="launch ip address calculator.")
def ipCalc():
ip.calc()
def main():
fourtyTwo()
| 2.546875
| 3
|
tictactoe/common.py
|
ephjos/ai
| 0
|
12777539
|
<reponame>ephjos/ai
#!/usr/bin/env python
from enum import Enum, auto
class Tile:
Empty = '-'
X = 'X'
O = 'O'
class Result(Enum):
Tie = auto()
X_Win = auto()
O_Win = auto()
def show_board(board):
for i in range(3):
i *= 3
print(f'{board[i]} {board[i+1]} {board[i+2]}')
print()
| 3.75
| 4
|
grid-navigation-paths-count/tests/test_string_permutations.py
|
dompuiu/puzzles
| 1
|
12777540
|
from unittest import TestCase
from grid_path.string_permutations import Permutations
class TestPermutations(TestCase):
def test_get_permutations_with_empty_string(self):
self.assertEqual(Permutations('').get_permutations(), set(['']))
def test_get_permutations_with_one_letter_word(self):
self.assertEqual(Permutations('A').get_permutations(), set(['A']))
def test_get_permutations_with_two_letters_word(self):
self.assertEqual(Permutations('AB').get_permutations(), set(['AB', 'BA']))
def test_get_permutations_with_three_letters_word(self):
self.assertEqual(Permutations('ABC').get_permutations(), set(['ABC', 'ACB', 'BAC', 'BCA', 'CAB', 'CBA']))
def test_get_permutations_with_same_letter_word(self):
self.assertEqual(Permutations('AA').get_permutations(), set(['AA']))
| 3.5
| 4
|
windbell/core/windfile.py
|
HawkinsZhao/windbell
| 4
|
12777541
|
<gh_stars>1-10
import os
import json
import yaml
import pystache
from windbell.core.exceptions import *
class WindfileConfig():
def __init__(self, content):
super(WindfileConfig)
self.value = yaml.load(content)
def check_schema(self):
return True
def calc_env_deps(self):
def _fetch(config):
envs = []
for key in config.keys():
if type(config[key]) == dict:
if 'from_env' in config[key].keys():
envs += [config[key]['from_env']]
else:
envs += _fetch(config[key])
return envs
return _fetch(self.value)
def dump(self):
return yaml.dump(self.value, default_flow_style=False)
class WindfileTemplate():
def __init__(self, content):
self.value = content
def dump(self):
return self.value
class Windfile():
def __init__(self, content):
super(Windfile)
if not '---' in content:
raise WindfileDamangedError()
windfile = content.split('\n')
split_idx = windfile.index('---')
config = windfile[0:split_idx]
config = '\n'.join(config)
self._config = WindfileConfig(config)
template = windfile[split_idx + 1:]
template = '\n'.join(template).strip()
self._template = WindfileTemplate(template)
@property
def config(self):
return self._config
@config.setter
def config(self, value):
self._config.value = yaml.load(value)
@property
def template(self):
return self._template
@template.setter
def template(self, value):
self._template.value = value
def render(self, data_injected={}, env_injected={}):
def _render_config(config):
envs = {
**dict(os.environ),
**env_injected
}
def if_dict(element):
if 'from_env' in element:
return envs[element['from_env']]
else:
return _render_config(element)
type_map = {
dict: if_dict,
str: lambda x: x,
list: lambda x: x
}
return {
key: type_map[type(config[key])](config[key])
for key in config.keys()
}
config = _render_config(self.config.value)
data = json.loads(json.dumps(config['data']))
data = {**data, **data_injected}
dist = pystache.render(self.template.value, data)
return dist, config
def dist(self):
config = self.config.dump()
template = self.template.dump()
return config + '\n---\n' + template
def json(self):
return json.dumps({
'envs': self.config.calc_env_deps(),
'config': self.config.dump(),
'template': self.template.dump()
})
| 2.140625
| 2
|
src/second_mininum_node_671.py
|
xiezhq-hermann/LeetCode-in-Python
| 3
|
12777542
|
<reponame>xiezhq-hermann/LeetCode-in-Python
#
# Given a non-empty special binary tree consisting of nodes with the non-negative value, where each node in this tree has exactly two or zero sub-node. If the node has two sub-nodes, then this node's value is the smaller value among its two sub-nodes.
#
# Given such a binary tree, you need to output the second minimum value in the set made of all the nodes' value in the whole tree.
#
# If no such second minimum value exists, output -1 instead.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
minimum = -1
while root.left:
if root.val < root.left.val <= root.right.val:
return root.left.val
elif root.val < root.right.val <= root.left.val:
return root.right.val
elif root.val == root.right.val < root.left.val:
minimum = self.findSecondMinimumValue(root.right)
return min(minimum, root.left.val) if minimum != -1 else root.left.val
elif root.val == root.left.val < root.right.val:
minimum = self.findSecondMinimumValue(root.left)
return min(minimum, root.right.val) if minimum != -1 else root.right.val
else:
left, right = self.findSecondMinimumValue(root.left), self.findSecondMinimumValue(root.right)
return min(left, right) if min(left, right) != -1 else max(left, right)
return minimum
| 3.6875
| 4
|
deinkscape.py
|
Emoji-COLRv0/emojitwo
| 313
|
12777543
|
#!/usr/bin/env python3
# -*- mode: python; coding: utf-8 -*-
# By HarJIT in 2020. MIT/Expat licence.
import os, xml.dom.minidom, shutil, re, glob
svgpresattrs = ("alignment-baseline", "baseline-shift", "clip", "clip-path", "clip-rule", "color",
"color-interpolation", "color-interpolation-filters", "color-profile", "color-rendering", "cursor",
"direction", "display", "dominant-baseline", "enable-background", "fill", "fill-opacity",
"fill-rule", "filter", "flood-color", "flood-opacity", "font-family", "font-size",
"font-size-adjust", "font-stretch", "font-style", "font-variant", "font-weight",
"glyph-orientation-horizontal", "glyph-orientation-vertical", "image-rendering", "kerning",
"letter-spacing", "lighting-color", "marker-end", "marker-mid", "marker-start", "mask", "opacity",
"overflow", "pointer-events", "shape-rendering", "solid-color", "solid-opacity", "stop-color",
"stop-opacity", "stroke", "stroke-dasharray", "stroke-dashoffset", "stroke-linecap",
"stroke-linejoin", "stroke-miterlimit", "stroke-opacity", "stroke-width", "text-anchor",
"text-decoration", "text-rendering", "transform", "unicode-bidi", "vector-effect",
"visibility", "word-spacing", "writing-mode")
needlessline = re.compile("(?m)^\s*\n")
def has_real_dc(document):
if document.getElementsByTagName("cc:license"):
return True
elif document.getElementsByTagName("cc:License"):
return True
elif document.getElementsByTagName("dc:contributor"):
return True
elif document.getElementsByTagName("cc:Agent"):
return True
elif document.getElementsByTagName("cc:permits"):
return True
elif document.getElementsByTagName("cc:requires"):
return True
return False
for pn in glob.glob("**/*.svg", recursive=True):
i = os.path.basename(pn)
if "draft" in i.casefold():
continue
document = xml.dom.minidom.parse(pn)
changed = False
keep_metadata = has_real_dc(document)
retain_ns = ["xmlns:xlink"]
if keep_metadata:
retain_ns.extend(["xmlns:rdf", "xmlns:cc", "xmlns:dc"])
for element in document.getElementsByTagName("*"):
if element.nodeName == "metadata" and not keep_metadata:
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
elif element.nodeName == "defs":
if (not element.childNodes) or (len(element.childNodes) == 1 and
element.firstChild.nodeName == "#text" and
not element.firstChild.wholeText.strip()):
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
elif element.nodeName.startswith(("inkscape:", "sodipodi:")):
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
#
if element.hasAttribute("style"):
# Rip SVG pres. attributes out of inline CSS, replacing any overridden attributes
# Note: this will bork on quoted ; in values, which I don't expect to occur.
stylelist = element.getAttribute("style").strip(";").split(";")
styleout = ""
for style in stylelist:
if ":" not in style:
continue # nvm
name, val = style.split(":", 1)
if name in svgpresattrs:
print(i, "attributising", name)
changed = True
element.setAttribute(name.strip(), val.strip())
elif "inkscape" in name:
print(i, "removing", name)
changed = True
pass
else:
print(i, "retaining", name)
changed = True
styleout += style + ";"
if not styleout:
element.removeAttribute("style")
else:
element.setAttribute("style", styleout)
for attr in list(element.attributes.keys())[:]:
if attr.startswith("stroke-") and not element.hasAttribute("stroke") and not (element.nodeName == "g"):
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif attr.startswith("inkscape:") or attr.startswith("sodipodi:"):
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif attr.startswith("xmlns:") and attr not in retain_ns:
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif (element.nodeName == "svg") and (attr == "version"):
print(i, "removing", attr)
changed = True
element.removeAttribute("version")
elif attr == "fill-opacity" and element.getAttribute("fill-opacity") == "1":
print(i, "removing", attr)
changed = True
element.removeAttribute("fill-opacity")
if element.hasAttribute("stroke"):
print(i, "has stroke")
if element.hasAttribute("id") and ((not element.parentNode) or
element.parentNode.nodeName != "defs"):
# Autogenerated ID rubbish
if re.compile(r"^{}\d+$".format(element.nodeName)).match(element.getAttribute("id")):
print(i, "removing ID", element.getAttribute("id"))
changed = True
element.removeAttribute("id")
if changed:
shutil.move(pn, pn + "~")
with open(pn, "w") as f:
x = document.toxml().replace("<?xml version=\"1.0\" ?>", "")
f.write("".join(needlessline.split(x)))
os.unlink(pn + "~")
| 1.929688
| 2
|
snapboard/forms.py
|
SarathkumarJ/snapboard
| 0
|
12777544
|
from sets import Set
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.forms import widgets, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from snapboard.models import Category, UserSettings
class PostForm(forms.Form):
post = forms.CharField(
label = '',
widget=forms.Textarea(attrs={
'rows':'8',
'cols':'120',
}),
)
private = forms.CharField(
label=_("Recipients"),
max_length=150,
widget=forms.TextInput(),
required=False,
)
def clean_private(self):
recipients = self.cleaned_data['private']
if len(recipients.strip()) < 1:
return []
recipients = filter(lambda x: len(x.strip()) > 0, recipients.split(','))
recipients = Set([x.strip() for x in recipients]) # string of usernames
u = User.objects.filter(username__in=recipients).order_by('username')
if len(u) != len(recipients):
u_set = Set([str(x.username) for x in u])
u_diff = recipients.difference(u_set)
raise ValidationError(ungettext(
"The following is not a valid user:", "The following are not valid user(s): ",
len(u_diff)) + ' '.join(u_diff))
return u
class ThreadForm(forms.Form):
# def __init__( self, *args, **kwargs ):
# super( ThreadForm, self ).__init__( *args, **kwargs )
# self.fields['category'] = forms.ChoiceField(
# label = _('Category'),
# choices = [(str(x.id), x.label) for x in Category.objects.all()]
# )
# # this is here to set the order
# category = forms.CharField(label=_('Category'))
subject = forms.CharField(max_length=80,
label=_('Subject'),
widget=forms.TextInput(
attrs={
'size': '80',
})
)
post = forms.CharField(widget=forms.Textarea(
attrs={
'rows':'8',
'cols': '80',
}),
label=_('Message')
)
# def clean_category(self):
# id = int(self.cleaned_data['category'])
# return id
class UserSettingsForm(forms.ModelForm):
def __init__(self, *pa, **ka):
user = ka.pop('user')
self.user = user
super(UserSettingsForm, self).__init__(*pa, **ka)
self.fields['frontpage_filters'].choices = [
(cat.id, cat.label) for cat in Category.objects.all() if
cat.can_read(user)
]
frontpage_filters = forms.MultipleChoiceField(label=_('Front page categories'))
class Meta:
model = UserSettings
exclude = ('user',)
def clean_frontpage_filters(self):
frontpage_filters = [cat for cat in (Category.objects.get(pk=id) for id in
self.cleaned_data['frontpage_filters']) if cat.can_read(self.user)]
return frontpage_filters
class LoginForm(forms.Form):
username = forms.CharField(max_length=30, label=_("Username"))
password = forms.CharField(widget=widgets.PasswordInput, label=_("Password"))
def clean_password(self):
scd = self.cleaned_data
self.user = authenticate(username=scd['username'], password=scd['password'])
if self.user is not None:
if self.user.is_active:
return self.cleaned_data['password']
else:
raise ValidationError(_('Your account has been disabled.'))
else:
raise ValidationError(_('Your username or password were incorrect.'))
class InviteForm(forms.Form):
user = forms.CharField(max_length=30, label=_('Username'))
def clean_user(self):
user = self.cleaned_data['user']
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
raise ValidationError(_('Unknown username'))
return user
class AnwserInvitationForm(forms.Form):
decision = forms.ChoiceField(label=_('Answer'), choices=((0, _('Decline')), (1, _('Accept'))))
# vim: ai ts=4 sts=4 et sw=4
| 2.234375
| 2
|
SeamErasure/lib/weight_data.py
|
fdp0525/seam-erasure
| 1
|
12777545
|
#!/usr/bin/env python
"""
Reads and writes weight data files.
!!! Weight data files must be in Image row ordering (0, 0) in the top-left. !!!
"""
from __future__ import print_function, division
from numpy import *
import gzip
def read_tex_from_file(ioFile):
'''
Reads a .data file into memory.
Inputs:
ioFile: a file for the .data file
Returns:
width-by-height-by-#channels numpy float32 array of data
width-by-height numpy boolean array where True values correspond to
values where weights are zero in all channels.
'''
f = gzip.GzipFile(fileobj=ioFile, mode='rb')
# fromfile() is a numpy function
# UPDATE: We can't use fromfile() on a gzip file object. We have to read
# it first and then use frombuffer().
# http://stackoverflow.com/questions/15966335/efficient-numpy-
# fromfile-on-zipped-files
# NOTE: I should make a dtype('')
header = f.read(3 * uint32().itemsize)
width, height, channels = frombuffer(header, uint32, 3)
# Make a mask.
# Since every pixel in the model should have some weight, the mask can
# be True if any non-zero weight every appears for a pixel.
mask = zeros((width, height), dtype = bool)
# This is inefficient. We could read it at once, but I don't want to think
# about making sure the channel-wise memory layout is what numpy wants.
result = zeros((width, height, channels), dtype = float32)
for chan in range(channels):
data = f.read(width * height * float32().itemsize)
data = frombuffer(data, float32, width * height).reshape(width, height)
# Update the mask with any nonzero entries.
mask = logical_or(mask, data != 0)
result[:, :, chan] = data
result = result[::-1]
return result, mask
def read_tex_from_path(path):
'''
Reads a .data file into memory.
Inputs:
path: a path to the .data file
Returns:
width-by-height-by-#channels numpy float32 array of data
width-by-height numpy boolean array where True values correspond to
values where weights are zero in all channels.
'''
print('+ Loading:', path)
with file(path, 'rb') as f:
result, mask = read_tex_from_path(f)
print('- Loaded:', path)
return result, mask
def write_tex_to_file(ioFile, data):
'''
Saves a .data to the given file.
Inputs:
ioFile: a File at which to save the .data file
data: width-by-height-by-#channels numpy float32 array of data
'''
data = data[::-1]
f = gzip.GzipFile(fileobj=ioFile, mode='wb')
header = zeros(3, dtype = uint32)
header[:] = data.shape
f.write(getbuffer(header))
channel = zeros((data.shape[0], data.shape[1]), dtype = float32)
for ch in range(data.shape[2]):
channel[:] = data[:, :, ch]
f.write(getbuffer(channel))
def write_tex_to_path(path, data):
'''
Saves a .data to disk.
Inputs:
path: a path at which to save the .data file
data: width-by-height-by-#channels numpy float32 array of data
'''
print('+ Saving:', path)
with file(path, 'wb') as f:
write_tex_to_file(f, data)
print('- Saved:', path)
def normalize_data(data, mask = None):
'''
Normalize the width-by-height-by-#channels array `data`, optionally
ignoring values for which `mask` is True. Modifies `data` in place and
returns None.
'''
if mask is None:
data /= data.sum(axis = 2)[:, :, newaxis]
else:
assert mask.shape == data.shape[:2]
data[mask] /= data.sum(axis = 2)[mask][..., newaxis]
if __name__ == '__main__':
import sys
def usage():
print("Usage:", sys.argv[0], "path/to/tex1.data path/to/tex2.data",
file = sys.stderr)
sys.exit(-1)
if len(sys.argv) != 3:
usage()
path1, path2 = sys.argv[1:]
tex1, mask1 = read_tex_from_path(path1)
tex2, mask2 = read_tex_from_path(path2)
assert tex1.shape == tex2.shape
assert mask1.shape == mask2.shape
assert all(mask1 == mask2)
tex1 = tex1[mask1]
tex2 = tex2[mask2]
# This is pretty memory intensive, so let's be efficient.
# diff:
# diff = tex1 - tex2
diff = tex1
subtract(tex1, tex2, diff)
# Don't use tex1 anymore, it's been reused as diff.
del tex1
# absolute difference:
# abs_diff = abs(tex1-tex2)
abs_diff = diff
absolute(diff, abs_diff)
# Don't use diff anymore, it's been reused as abs_diff.
del diff
total_abs_diff = abs_diff.sum()
print('Total absolute difference:', total_abs_diff)
print('Average absolute difference:',
total_abs_diff / prod(abs_diff.shape))
print('Median absolute difference:', median(abs_diff))
print('Maximum absolute difference:', abs_diff.max())
print('Minimum absolute difference:', abs_diff.min())
# difference, squared:
# abs_diff2 = abs_diff**2
abs_diff2 = abs_diff
square(abs_diff, abs_diff2)
# Don't use abs_diff anymore, it's been reused as abs_diff2.
del abs_diff
avg_abs_diff2 = average(abs_diff2)
print('Mean squared error:', avg_abs_diff2)
print('Root mean squared error:', sqrt(avg_abs_diff2))
| 3.125
| 3
|
customer/migrations/0014_pspuser_pending_deposit.py
|
neonexchange/psp_template
| 5
|
12777546
|
# Generated by Django 2.0 on 2018-01-20 23:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0013_auto_20180120_2322'),
]
operations = [
migrations.AddField(
model_name='pspuser',
name='pending_deposit',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='customer.Deposit'),
),
]
| 1.578125
| 2
|
utils/random.py
|
Saizuo/EpicBot
| 3
|
12777547
|
"""
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from discord.ext import commands
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
characters = "!@#$%&*"
numbers = "1234567890"
email_fun = [
'69420', '8008135', 'eatsA$$', 'PeekABoo',
'TheShire', 'isFAT', 'Dumb_man', 'Ruthless_gamer',
'Sexygirl69', 'Loyalboy69', 'likesButts'
]
passwords = [
'<PASSWORD>', '<PASSWORD>', '<PASSWORD>',
'<PASSWORD>', '<PASSWORD>',
'<PASSWORD>', '<PASSWORD>',
'SayHelloToMyLittleFriend', 'ImUnderYourBed',
'TellMyWifeILoveHer', '<PASSWORD>', '<PASSWORD>', 'IKnewYouWouldHackIntoMyAccount',
'BestPasswordE<PASSWORD>', '<PASSWORD>', 'VoteMikuniUwU'
]
DMs = [
"send nudes please", "i invited Mikuni and i got a cookie",
"i hope my mum doesn't find my nudes folder",
"please dont bully me", "https://youtu.be/oHg5SJYRHA0",
"i like bananas", "i use discord in light mode",
"if you are reading this u shud vote Mikuni", "send feet pics when",
"sUbScRiBe To mY yOuTuBe ChAnNeL", "the impostor is sus", "python makes me horny"
]
discord_servers = [
"Sons of Virgins", "Small Benis Gang", "Gamers United",
"Anime Server 69420", "Cornhub", "<NAME>"
]
def gen_random_string(l_: int):
uwu = ""
for i in range(l_ + 1):
uwu += random.choice((letters + numbers))
return uwu
async def send_random_tip(ctx: commands.Context, msg: str, chances: int):
if random.randint(1, chances) == chances:
return await ctx.send(f"**Pro Tip:** {msg}")
else:
pass
| 1.945313
| 2
|
lib/rucio/db/sqla/migrate_repo/versions/4783c1f49cb4_create_distance_table.py
|
balrampariyarath/rucio
| 1
|
12777548
|
<gh_stars>1-10
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <<EMAIL>>, 2015
# - <NAME>, <<EMAIL>>, 2017
"""create distance table
Revision ID: 4783c1f49cb4
Revises: 277b5fbb41d3
Create Date: 2015-05-21 08:11:14.318464
"""
from alembic.op import (create_table, create_primary_key, create_foreign_key,
create_check_constraint, create_index, drop_table)
from alembic import context
import sqlalchemy as sa
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '4783c1f49cb4'
down_revision = '277b5fbb41d3'
def upgrade():
'''
upgrade method
'''
create_table('distances',
sa.Column('src_rse_id', GUID()),
sa.Column('dest_rse_id', GUID()),
sa.Column('ranking', sa.Integer),
sa.Column('agis_distance', sa.Integer),
sa.Column('geoip_distance', sa.Integer),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime))
if context.get_context().dialect.name != 'sqlite':
create_primary_key('DISTANCES_PK', 'distances', ['src_rse_id', 'dest_rse_id'])
create_foreign_key('DISTANCES_SRC_RSES_FK', 'distances', 'rses', ['src_rse_id'], ['id'])
create_foreign_key('DISTANCES_DEST_RSES_FK', 'distances', 'rses', ['dest_rse_id'], ['id'])
create_check_constraint('DISTANCES_CREATED_NN', 'distances', 'created_at is not null')
create_check_constraint('DISTANCES_UPDATED_NN', 'distances', 'updated_at is not null')
create_index('DISTANCES_DEST_RSEID_IDX', 'distances', ['dest_rse_id'])
def downgrade():
'''
downgrade method
'''
drop_table('distances')
| 1.90625
| 2
|
bpf-echo.py
|
cwyzb/bpf-echo
| 2
|
12777549
|
<reponame>cwyzb/bpf-echo
#!/usr/bin/env python3
# Copyright 2019 Path Network, Inc. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from bcc import BPF
from pyroute2 import IPRoute
import socket
import ipaddress
import argparse
import time
import sys
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--ipv4",
default="127.0.0.1",
help="IPv4 address that will reflect packets. Disabled if empty string.",
)
parser.add_argument(
"--ipv6",
default="",
help="IPv6 address that will reflect packets. Disabled if empty string.",
)
parser.add_argument(
"--port",
type=int,
default=12345,
help="TCP/UDP destination port that will reflect packets.",
)
parser.add_argument(
"--ifname", default="lo", help="Interface the eBPF classifier will be loaded on."
)
args = parser.parse_args()
if not args.ipv4 and not args.ipv6:
print("at least one of --ipv4 and --ipv6 has to be given", file=sys.stderr)
exit(1)
ipr = IPRoute()
text = """
#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
int echo(struct __sk_buff *skb) {
void *data = (void*)(long)skb->data;
void *data_end = (void*)(long)skb->data_end;
struct ethhdr *eth = data;
if (unlikely((void*)(eth + 1) > data_end))
return TC_ACT_SHOT;
if (unlikely(eth->h_proto != htons(ETH_P_IP) && eth->h_proto != htons(ETH_P_IPV6)))
return TC_ACT_OK;
struct iphdr *ip = (void*)(eth + 1);
struct ipv6hdr *ip6 = (void*)(eth + 1);
void *ip_payload;
u8 l4_proto;
u16 len = 0;
if (eth->h_proto == htons(ETH_P_IP)) {
#ifdef ENABLE_IPV4
if (unlikely((void*)(ip + 1) > data_end))
return TC_ACT_SHOT;
if (ip->daddr != IPV4_DEST)
return TC_ACT_OK;
l4_proto = ip->protocol;
ip_payload = (void*)(ip + 1);
#else
return TC_ACT_OK;
#endif
} else {
#ifdef ENABLE_IPV6
if (unlikely((void*)(ip6 + 1) > data_end))
return TC_ACT_SHOT;
u64 *ipdest = (void*)&ip6->daddr;
if (ipdest[0] != IPV6_DEST_HIGH || ipdest[1] != IPV6_DEST_LOW)
return TC_ACT_OK;
l4_proto = ip6->nexthdr;
ip_payload = (void*)(ip6 + 1);
#eldse
return TC_ACT_OK;
#endif
}
if (unlikely(l4_proto != IPPROTO_TCP && l4_proto != IPPROTO_UDP))
return TC_ACT_OK;
u16 *sport = ip_payload;
if (unlikely((void*)(sport + 1) > data_end))
return TC_ACT_SHOT;
u16 *dport = (void*)(sport + 1);
if (unlikely((void*)(dport + 1) > data_end))
return TC_ACT_SHOT;
if (*dport != DPORT)
return TC_ACT_OK;
if (l4_proto == IPPROTO_TCP) {
struct tcphdr *tcp = ip_payload;
if (unlikely((void*)(tcp + 1) > data_end))
return TC_ACT_SHOT;
if (tcp->syn || tcp->fin || tcp->rst)
return TC_ACT_OK;
u32 tmp_seq = tcp->seq;
tcp->seq = tcp->ack_seq;
tcp->ack_seq = tmp_seq;
}
u8 tmp_mac[ETH_ALEN];
memcpy(tmp_mac, eth->h_dest, ETH_ALEN);
memcpy(eth->h_dest, eth->h_source, ETH_ALEN);
memcpy(eth->h_source, tmp_mac, ETH_ALEN);
if (eth->h_proto == htons(ETH_P_IP)) {
u32 tmp_ip = ip->saddr;
ip->saddr = ip->daddr;
ip->daddr = tmp_ip;
} else {
u64 tmp_ip;
u64 *ipsrc = (void*)&ip6->saddr, *ipdest = (void*)&ip6->daddr;
tmp_ip = ipsrc[0];
ipsrc[0] = ipdest[0];
ipdest[0] = tmp_ip;
tmp_ip = ipsrc[1];
ipsrc[1] = ipdest[1];
ipdest[1] = tmp_ip;
}
u16 tmp_port = *sport;
*sport = *dport;
*dport = tmp_port;
return TC_ACT_OK;
}
"""
try:
port = socket.htons(args.port)
idx = ipr.link_lookup(ifname=args.ifname)[0]
cflags = ["-DDPORT={}".format(port)]
sock4 = None
if args.ipv4 != "":
ipv4 = int.from_bytes(
ipaddress.IPv4Address(args.ipv4).packed, byteorder="little"
)
cflags.extend(("-DENABLE_IPV4", "-DIPV4_DEST={}u".format(ipv4)))
sock4 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock4.bind((args.ipv4, args.port))
sock4.listen(1024)
sock6 = None
if args.ipv6:
ipv6 = ipaddress.IPv6Address(args.ipv6)
ipv6_high = int.from_bytes(ipv6.packed[:8], byteorder="little")
ipv6_low = int.from_bytes(ipv6.packed[8:], byteorder="little")
cflags.extend(
(
"-DENABLE_IPV6",
"-DIPV6_DEST_HIGH={}ull".format(ipv6_high),
"-DIPV6_DEST_LOW={}ull".format(ipv6_low),
)
)
sock6 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock6.bind((args.ipv6, args.port))
sock6.listen(1024)
b = BPF(text=text, debug=0, cflags=cflags)
fn = b.load_func("echo", BPF.SCHED_CLS)
ipr.tc("add", "clsact", idx)
ipr.tc(
"add-filter",
"bpf",
idx,
":1",
fd=fn.fd,
name=fn.name,
parent="ffff:fff3",
classid=1,
direct_action=True,
)
while True:
time.sleep(1)
finally:
if "idx" in locals():
ipr.tc("del", "clsact", idx)
| 2.359375
| 2
|
tests/test_learning.py
|
priyankshah7/hypers
| 10
|
12777550
|
import numpy as np
import hypers as hp
class TestLearning:
def setup(self):
self.n3 = np.random.rand(10, 10, 30)
self.n4 = np.random.rand(10, 10, 10, 30)
self.n5 = np.random.rand(10, 10, 10, 2, 30)
self.h3 = hp.hparray(self.n3)
self.h4 = hp.hparray(self.n4)
self.h5 = hp.hparray(self.n5)
self.arrays = (self.h3, self.h4, self.h5)
def test_abundance(self):
for array in self.arrays:
ucls = array.abundance.ucls
nnls = array.abundance.nnls
fcls = array.abundance.fcls
for amethod in (ucls, nnls, fcls):
spec1d = np.random.rand(array.shape[-1])
_ = amethod.calculate(spec1d)
assert amethod.map.shape == array.shape[:-1] + (1,)
spec2d = np.random.rand(array.shape[-1], 3)
_ = amethod.calculate(spec2d)
assert amethod.map.shape == array.shape[:-1] + (3,)
| 2.671875
| 3
|