code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Make torrent.
Author(s): Arno Bakker, Bram Cohen
"""
from tribler_core.utilities import path_util
from tribler_core.utilities.unicode import ensure_unicode_detect_encoding
def pathlist2filename(pathlist):
""" Convert a multi-file torrent file 'path' entry to a filename. """
return path_util.join(*(ensure_unicode_detect_encoding(x) for x in pathlist))
def get_length_from_metainfo(metainfo, selectedfiles):
if b'files' not in metainfo[b'info']:
# single-file torrent
return metainfo[b'info'][b'length']
# multi-file torrent
files = metainfo[b'info'][b'files']
total = 0
for i in range(len(files)):
path = files[i][b'path']
length = files[i][b'length']
if length > 0 and (not selectedfiles or pathlist2filename(path) in selectedfiles):
total += length
return total
|
hbiyik/tribler
|
src/tribler-core/tribler_core/utilities/maketorrent.py
|
Python
|
lgpl-3.0
| 859
|
#!/usr/bin/python
#
# Copyright (C) 2015 JWCrypto Project Contributors, see LICENSE file
from setuptools import setup
setup(
name = 'jwcrypto',
version = '0.2.0',
license = 'LGPLv3+',
maintainer = 'JWCrypto Project Contributors',
maintainer_email = 'simo@redhat.com',
url='https://github.com/simo5/jwcrypto',
packages = ['jwcrypto'],
description = 'Implementation of JOSE Web standards',
classifiers = [
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules'
],
data_files = [('share/doc/jwcrypto', ['LICENSE', 'README.md'])],
)
|
puiterwijk/jwcrypto
|
setup.py
|
Python
|
lgpl-3.0
| 764
|
from pycp2k.inputsection import InputSection
class _becke88_lr3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Scale_x = None
self.Omega = None
self._name = "BECKE88_LR"
self._keywords = {'Omega': 'OMEGA', 'Scale_x': 'SCALE_X'}
self._attributes = ['Section_parameters']
|
SINGROUP/pycp2k
|
pycp2k/classes/_becke88_lr3.py
|
Python
|
lgpl-3.0
| 385
|
from zope.interface import implements
from Products.CMFQuickInstallerTool.interfaces import INonInstallable
from five import grok
from collective.grok import gs
from zope.i18nmessageid import MessageFactory
# Set up the i18n message factory for our package
MessageFactory = MessageFactory('wcc.api')
_ = MessageFactory
class HiddenProducts(grok.GlobalUtility):
"""This hides the upgrade profiles from the quick installer tool."""
implements(INonInstallable)
grok.name('wcc.api.upgrades')
def getNonInstallableProducts(self):
return [
'wcc.api.upgrades',
]
gs.profile(name=u'default',
title=u'wcc.api',
description=_(u''),
directory='profiles/default')
|
oikoumene/wcc.api
|
wcc/api/__init__.py
|
Python
|
lgpl-3.0
| 734
|
#!/usr/local/bin/python3
''' # ---
compare_record_counts.py
Compares two files' full of csv-formatted lists of record counts -- files
generated by mysql_record_count.py, for example. This program generates a file
giving a list of tables with changes in the counts of their records and the
number of records added or deleted.
''' # ---
import re, os, sys, time, io
filePathA = ''
filePathB = ''
def cleanPath( pathIn ):
sep = os.path.sep
if sep == '\\':
sep = '\\\\'
newPath = re.sub( r'[\\/]', sep, pathIn )
return newPath
def loadDict( myPath, myDict ):
with open( myPath, encoding='utf8' ) as myFile:
for line in myFile:
(a, b) = line.split(",")
myDict[a] = b.rstrip()
return myDict
quitPattern = re.compile( r'^(quit|q|stop|exit)$' )
# Get one input file path
print()
while not os.path.isfile(filePathA):
print( 'Path to input file 1:' )
filePathA = cleanPath( input() )
if quitPattern.search( filePathA.lower() ):
print( 'exiting' )
sys.exit()
if not os.path.isfile( filePathA ):
print( '\ninput file ' + filePathA + ' not found\n' )
# Get the other input file path
print()
while not os.path.isfile(filePathB):
print( 'Path to input file 2:' )
filePathB = cleanPath( input() )
if quitPattern.search( filePathB.lower() ):
print( 'exiting' )
sys.exit()
if not os.path.isfile( filePathB ):
print( '\ninput file ' + filePathB + ' not found\n' )
dictA = {}
dictB = {}
diffs = {}
loadDict( filePathA, dictA )
loadDict( filePathB, dictB )
for (k,v) in dictA.items():
if dictB[k] != v:
if dictB[k] > v:
diffs[k] = '+' + str(int(dictB[k]) - int(v))
else:
diffs[k] = '-' + str(int(v) - int(dictB[k]))
baseA = os.path.splitext(os.path.basename(filePathA))[0]
baseB = os.path.splitext(os.path.basename(filePathB))[0]
with open( 'compare____' + baseA + '____' + baseB + '.log', 'w' ) as outFile:
for (k,v) in diffs.items():
outFile.write( k.ljust(32) + v[0] + v[1:].rjust(6) + '\n' )
print()
# the end
# ---
|
drewdle/landshark
|
compare_record_counts.py
|
Python
|
unlicense
| 2,126
|
from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from filer.fields.image import FilerImageField
from cms.models import CMSPlugin, PlaceholderField
from django.utils.translation import ugettext_lazy as _
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=128, null=False)
image = FilerImageField(null=True, blank=True)
project_summary = RichTextField(null=True, blank=True)
location = models.CharField(max_length=128, null=False)
start_implementation = models.DateField(null=True, blank=True)
end_implementation = models.DateField(null=True, blank=True)
def __str__(self):
return '{} in {}'.format(self.name, self.location)
class ContactPerson(models.Model):
name = models.CharField(max_length=128)
telephone = models.CharField(max_length=128, null=True, blank=True)
email = models.EmailField(max_length=128, null=True, blank=True)
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_contact')
def __str__(self):
return '{} - {}'.format(self.name, self.email)
class Report(models.Model):
version = models.CharField(max_length=128, null=True, blank=True,
help_text=(_('Use numbers <small>e.g</small> A or B')))
completed = models.DateField(null=True, blank=True)
question = models.ForeignKey('Question', on_delete=models.CASCADE, related_name='questions')
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_report')
class Meta:
permissions = (
('can_add_report', 'Can add Rreport'),
('can_edit_report', 'Can edit Report')
)
def __str__(self):
return '{} completed on {}'.format(self.version, self.completed)
class Question(models.Model):
def composition():
outline = {
'findings': u'<p>Add Your Findings here<p><p>Identify Your Audience here<p>'
u'<p>Add Your Findings Description here<p><p>Add Your Conclusion and Recommendations here<p>',
}
return outline
# f_default = composition()
# defaults = f_default.values()
number = models.IntegerField(null=True, blank=True, help_text=(_('Use numbers <small>e.g</small> 1, 2 or 3')))
question = models.CharField(max_length=128, null=True, blank=True)
findings = RichTextField(null=True, blank=True,
default=composition()['findings'],
help_text=_(
'Do not delete the tags <pre><code><p> ... <p></code></pre>'
))
image = models.ImageField(max_length=128000, null=True, blank=True, upload_to='media/project')
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_question')
add_findings_placeholder = PlaceholderField(slotname='add_findings')
class Meta:
permissions = (
('can_add_question', 'Can add Question'),
('can_edit_question', 'Can edit Question')
)
def __str__(self):
return '{} for {}'.format(self.number, self.project)
class Stick2UgandaPlugin(CMSPlugin):
info = RichTextField(null=True, blank=True)
intro_small = models.CharField(max_length=128, null=True, blank=True)
def __str__(self):
return self.stick2uganda.name
class S2UImagePlugin(CMSPlugin):
image = FilerImageField(blank=True, null=True)
title = models.CharField(max_length=128, blank=True, null=True)
|
pmutale/www.mutale.nl
|
stick2uganda/models.py
|
Python
|
unlicense
| 3,647
|
"""
Tuples are very similar to lists, except that they are immutable (they cannot be changed).
"""
words = ("spam", "eggs", "sausages",)
print(words[0])
# spam
"""
Trying to reassign a value in a tuple causes a TypeError.
"""
words[1] = "cheese"
# TypeError: 'tuple' object does not support item assignment
"""
An empty tuple is created using an empty parenthesis pair.
"""
tpl = ()
|
rajibmp/sololearn
|
python/06-types/06-tuples.py
|
Python
|
unlicense
| 394
|
"""
Django app for easy embeding YouTube and Vimeo videos and music from
SoundCloud.
"""
VERSION = (0, 8, 'dev')
def get_release():
return '.'.join(str(i) for i in VERSION[:3])
def get_version():
"""
Returns only digit parts of version.
"""
return '.'.join(str(i) for i in VERSION[:2])
__version__ = get_release()
|
orlenko/bccf
|
src/embed_video/__init__.py
|
Python
|
unlicense
| 341
|
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
print("done in %fs" % (time() - t0))
print()
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = np.asarray(vectorizer.get_feature_names())
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.f1_score(y_test, pred)
print("f1-score: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
yaodi833/shorttext
|
document_classification_20newsgroups.py
|
Python
|
unlicense
| 9,740
|
#!/usr/bin/python3.4
# -*- coding: iso-8859-15 -*-
# -O Optimize e non scrive il __debug__
#
# ####################################################################################################################
import sys, os
import types
import platform
import openpyxl
from openpyxl.utils import get_column_letter
import openpyxl.utils as xls
from ..LnCommon.LnLogger import SetLogger # OK funziona dalla upperDir del package
##########################################################################################################
# Per la creazione di attributi read-only
# http://stackoverflow.com/questions/9920677/how-should-i-expose-read-only-fields-from-python-classes
# http://stackoverflow.com/questions/14594120/python-read-only-property
##########################################################################################################
def ro_property(name):
def ro_property_decorator(c):
setattr(c, name, property(lambda o: o.__dict__["_" + name]))
return c
return ro_property_decorator
# creazione delle variabili che si voglio mettere in R/O
@ro_property('name')
@ro_property('address')
@ro_property('description')
@ro_property('author')
# ###########################################################
# - getInterfacesData(ifc) - con un solo indirizzo IP sulla scheda
# ###########################################################
class Excel(object):
# ***********************************************
# * Calling Sample:
# * eth0 = Ln.LnInterfaces('eth0', myIP=True, DRYRUN=True, setLogger=gv.Ln.setLogger)
# ***********************************************
def __init__(self, excelFileName, fDEBUG=False):
# ----- defaults
self._name = None
self._description = None
self._author = None
self._setLogger = None
self._fDEBUG = fDEBUG
self._filename = excelFileName
self._description = "class to manage execl file."
self._author = "Loreto Notarantonio"
self._SetLogger = SetLogger
self._read()
#######################################################
# - read()
# - Lettura di un file Excel e ritorna il WorkBook
#######################################################
def _read(self, keep_vba=False):
logger = self._SetLogger(__name__)
try:
# warnings.simplefilter("ignore")
# in read-only=True:
# colNames = ws.rows[1]
# TypeError: 'generator' object is not subscriptable
self._wb = openpyxl.load_workbook( self._filename,
read_only=True,
keep_vba=False,
data_only=True # False: include le formule
)
# use_iterators=False,
self.sheetNames = self._wb.get_sheet_names()
except Exception as why:
print("error reading file: {0} [{1}]".format(self._filename, why))
logger.error("error reading file: {0} [{1}]".format(self._filename, why))
sys.exit(99)
if self._fDEBUG:
print('sheet names: {0}'.format(self.sheetNames))
logger.info('sheet names: {0}'.format(self.sheetNames))
####################################################################
# - exportToCSV()
# - Export di un foglio excel to CSV format
# - rangeString: range di celle su cui operare
# - colNames : numero di riga che contiene i nomi delle colonne
####################################################################
def exportCSV(self, sheetName,
outFname,
rangeString=None,
colNames=0,
maxrows=99999999,
encoding='utf-8',
stripFields=True,
fPRINT=False):
logger = self._SetLogger(__name__)
if fPRINT:
print("Converting sheetName: [{0}] to CSV file: [{1}]." .format(sheetName, outFname))
ws = self._wb.get_sheet_by_name(sheetName)
nRows = ws.max_row
nCols = ws.max_column
# print (type(rangeString), rangeString)
if rangeString:
minCol, minRow, maxCol, maxRow = xls.range_boundaries(rangeString)
else:
minCol, minRow, maxCol, maxRow = 1, 1, ws.max_column, ws.max_row
fullRange = get_column_letter(minCol) + str(minRow) + ':' + get_column_letter(maxCol) + str(maxRow)
logger.info(" full Range: {0}".format(fullRange))
minCol -= 1 # col parte da '0'
maxCol -= 1 # col parte da '0'
# ---------------------------------
# - grosso modo può andare.....
# ---------------------------------
dataList = []
# dataListOfList = []
for indexRow, row in enumerate(ws.rows):
# - prendiamo tutte le righe previste nel range
if minRow <= indexRow < maxRow:
# - ...e lo stesso per le colonne
if indexRow >= colNames:
line = []
for indexCol, cell in enumerate(row):
if minCol <= indexCol <= maxCol:
val = cell.value if cell.value else ''
# if stripFields and isinstance(val, str): val=val.strip()
line.append(val)
else:
continue
# costruiamo la riga ... con i valori delle celle appena lette
lineStr = line[0]
for item in line[1:]:
if isinstance(item, str) and stripFields:
lineStr = '{0};{1}'.format(lineStr, item.strip())
else:
lineStr = '{0};{1}'.format(lineStr, item)
# ... per inserirla nell'array
dataList.append(lineStr)
if self._fDEBUG:
for index, line in enumerate(dataList):
print ('{0:5} - {1}'.format(index, line))
if outFname:
FILE = open(outFname, "w", encoding=encoding)
for line in dataList:
line = "{0}{1}".format(line, '\n')
FILE.write(line) # con Python3 bisogna convertirlo in bytes
FILE.close()
logger.info("..... file: {FILE} has been written".format(FILE=outFname))
if fPRINT:
print("..... file: {FILE} has been written".format(FILE=outFname))
if self._fDEBUG:
print()
print(" full Range: {0}".format(fullRange))
print(" file {0} has been created".format(outFname))
print()
for item in dataList:
print (item)
print ()
self.data = dataList
#######################################################
# -
#######################################################
if __name__ == "__main__":
excelFileName = 'J:\\GIT-REPO\\Python3\\MP3Catalog\\data\\MP3_Master_forTEST.xlsm'
csvFile = excelFileName.rsplit('.', -1)[0] + '.csv'
mydata = Excel(excelFileName)
mydata.exportToCSV('Catalog', outFname=csvFile, rangeString="B2:Z17", colNames=4, fPRINT=True)
|
Loreton/MP3Catalog
|
Source/LnLib/Excel/LnExcel_Class.py
|
Python
|
unlicense
| 7,629
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-07 14:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0012_auto_20170407_1442'),
]
operations = [
migrations.AlterUniqueTogether(
name='selectedanswer',
unique_together=set([]),
),
]
|
denys-zarubin/sweetheart_test
|
quiz/migrations/0013_auto_20170407_1444.py
|
Python
|
unlicense
| 407
|
from __future__ import print_function
from fabric.api import local, run, cd, env
env.forward_agent = 'True'
def deploy():
local('git push')
with cd('webapps/basic/nematode'):
run('git pull')
run('chmod 644 *.js *.css *.html *.png *.pdf .htaccess')
|
chebee7i/nematode
|
fabfile.py
|
Python
|
unlicense
| 275
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hello World web app that mimics a legacy end-to-end test."""
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class DefaultRequestHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello World!')
application = webapp.WSGIApplication([('/', DefaultRequestHandler), ])
def main():
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
GoogleCloudPlatform/python-compat-runtime
|
appengine-vmruntime/vmruntime/tests/legacy_e2e_support_test_app.py
|
Python
|
apache-2.0
| 1,030
|
"""
.. module: cloudaux.aws.decorators
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
import functools
import time
import boto
import botocore
RATE_LIMITING_ERRORS = ['Throttling', 'RequestLimitExceeded', 'SlowDown', 'RequestThrottled']
def rate_limited(max_attempts=None, max_delay=4):
def decorator(f):
metadata = {
'count': 0,
'delay': 0
}
@functools.wraps(f)
def decorated_function(*args, **kwargs):
def increase_delay(e):
if metadata['delay'] == 0:
metadata['delay'] = 1
elif metadata['delay'] < max_delay:
metadata['delay'] *= 2
if max_attempts and metadata['count'] > max_attempts:
raise e
metadata['count'] = 0
while True:
metadata['count'] += 1
if metadata['delay'] > 0:
time.sleep(metadata['delay'])
try:
retval = f(*args, **kwargs)
metadata['delay'] = 0
return retval
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
except boto.exception.BotoServerError as e:
if e.error_code not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
return decorated_function
return decorator
def paginated(response_key, request_pagination_marker="Marker", response_pagination_marker="Marker"):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results.extend(response[response_key])
# If the "next" pagination marker is in the response, then paginate. Responses may not always have
# items in the response_key, so we should only key off of the response_pagination_marker.
if response.get(response_pagination_marker):
kwargs.update({request_pagination_marker: response[response_pagination_marker]})
else:
break
return results
return decorated_function
return decorator
|
Netflix-Skunkworks/cloudaux
|
cloudaux/aws/decorators.py
|
Python
|
apache-2.0
| 2,672
|
import sublime
_acc_value = 1
_busy_indicator_size = 8
_commandsInProgress = []
def run_progress_indicator(thread, message, success_message, fail_message):
global _commandsInProgress
_commandsInProgress.append(CommandInProgressModel(thread, message, success_message, fail_message))
if len(_commandsInProgress) == 1:
_run(0)
def _add_command(commandInProgressModel):
global _commandsInProgress
_commandsInProgress.append(commandInProgressModel)
def _run(index):
global _commandsInProgress
if len(_commandsInProgress) >= 1:
in_progress_part = _get_in_progress_part()
finished_part = _get_finished_part()
busy_animation_part = _get_busy_animation_part(index)
status_message = ""
if in_progress_part:
status_message = "%s %s %s" % \
(in_progress_part, busy_animation_part, finished_part)
else:
status_message = "%s" % \
(finished_part)
sublime.status_message(status_message)
_update_commands_models()
sublime.set_timeout(lambda: _run(index + _acc_value), 100)
else:
sublime.status_message("")
def _update_commands_models():
global _commandsInProgress
_commandsInProgress = [commandModel for commandModel in _commandsInProgress if not commandModel.can_release()]
def _get_in_progress_part():
global _commandsInProgress
in_progress_commands_messages = [commandModel.message for commandModel in _commandsInProgress if commandModel.is_running()]
return " | ".join(in_progress_commands_messages)
def _get_finished_part():
global _commandsInProgress
finished_commands_messages = [commandModel.get_result_message() for commandModel in _commandsInProgress if not commandModel.is_running()
and commandModel.get_result_message() != ""]
return " | ".join(finished_commands_messages);
def _get_busy_animation_part(index):
before = index % _busy_indicator_size
after = (_busy_indicator_size - 1) - before
if not after:
_acc_value = -1
if not before:
_acc_value = 1
return "[%s = %s]" % \
("-" * before, "-" * after)
class CommandInProgressModel:
_iterations_before_release = 20
def __init__(self, thread, message, success_message, fail_message):
self.iterations_before_release = CommandInProgressModel._iterations_before_release
self.thread = thread
self.message = message
self.success_message = success_message
self.fail_message = fail_message
def is_running(self):
return self.thread.is_alive()
def get_result_message(self):
if not self.thread.is_alive():
self.iterations_before_release -= 1
if hasattr(self.thread, "result") and not self.thread.result:
return ""
if self.thread.success():
return self.success_message;
else:
return self.fail_message;
else:
return "";
def can_release(self):
return self.iterations_before_release == 0
|
dimitardanailov/appbuilder-sublime-package
|
app_builder/thread_progress.py
|
Python
|
apache-2.0
| 3,101
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.ui import *
from debugger import *
from ui.output_tab import *
class TestOutputTab(UITestCaseBase):
def test_output_ls_on_launch(self):
otab = self.run_on_ui(lambda mc: mc.find_tab(OutputTab))
proc1 = self.run_on_ui(lambda mc: mc.debugger.begin_launch_suspended("tests/apps/test1").wait())
self.run_on_ui(lambda mc: self.assertEqual(len(otab._ls), 1))
proc2 = self.run_on_ui(lambda mc: mc.debugger.begin_launch_suspended("tests/apps/test2").wait())
self.run_on_ui(lambda mc: self.assertEqual(len(otab._ls), 2))
|
natduca/ndbg
|
tests/ui/test_output_tab.py
|
Python
|
apache-2.0
| 1,123
|
"""compare providers
Revision ID: 6261d01a8a89
Revises: b5c4bf4603be
Create Date: 2016-05-25 08:33:09.061411
"""
# revision identifiers, used by Alembic.
revision = '6261d01a8a89'
down_revision = 'b5c4bf4603be'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.create_table('providers_comparison_aws',
sa.Column('id', sa.Integer, nullable=False),
sa.Column('id_aws_key', sa.Integer, nullable=False),
sa.Column('value', sa.BLOB(length=16000000), nullable=False),
sa.Column('date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['id_aws_key'], ['aws_key.id']),
)
def downgrade():
op.drop_table('providers_comparison_aws')
|
giubil/trackit
|
api/files/api/migrations/versions/6261d01a8a89_compare_providers.py
|
Python
|
apache-2.0
| 808
|
#
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import element.loaders
class StaticNodeLoader(element.loaders.NodeLoader):
"""
Load a node from a static file
"""
def __init__(self, mimetypes):
self.mimetypes = mimetypes
def supports(self, path):
if isinstance(path, dict):
return 'type' in path and path['type'] == 'element.static'
filename, extension = os.path.splitext(path)
return extension[1:] in self.mimetypes
def load(self, path):
filename, extension = os.path.splitext(path)
return {
'type': 'element.static',
'file': path,
'title': filename,
'extension': extension[1:],
'mimetype': self.mimetypes[extension[1:]],
'content': False
}
def save(self, path, data):
fp = file(path, 'wb')
fp.write(data['content'])
fp.flush()
fp.close()
return os.path.isfile(path)
|
rande/python-element
|
element/plugins/static/loader.py
|
Python
|
apache-2.0
| 1,563
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Qtopt Agent.
Implements the Qt-Opt algorithm from
"QT-Opt: Scalable Deep Reinforcement Learning for Vision-Based Robotic"
"Manipulation"
Dmitry Kalashnikov et al., 2018
https://arxiv.org/abs/1806.10293
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.agents import data_converter
from tf_agents.agents import tf_agent
from tf_agents.networks import utils as network_utils
from tf_agents.policies import epsilon_greedy_policy
from tf_agents.policies import qtopt_cem_policy
from tf_agents.specs import tensor_spec
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
def compute_td_targets(next_q_values: types.Tensor,
rewards: types.Tensor,
discounts: types.Tensor) -> types.Tensor:
return tf.stop_gradient(rewards + discounts * next_q_values)
class QtOptLossInfo(typing.NamedTuple):
"""QtOptLossInfo is stored in the `extras` field of the LossInfo instance.
Both `td_loss` and `td_error` have a validity mask applied to ensure that
no loss or error is calculated for episode boundaries.
td_loss: The **weighted** TD loss (depends on choice of loss metric and
any weights passed to the QtOpt loss function.
td_error: The **unweighted** TD errors, which are just calculated as:
```
td_error = td_targets - q_values
```
These can be used to update Prioritized Replay Buffer priorities.
Note that, unlike `td_loss`, `td_error` may contain a time dimension when
training with RNN mode. For `td_loss`, this axis is averaged out.
"""
td_loss: types.Tensor
td_error: types.Tensor
@gin.configurable
class QtOptAgent(tf_agent.TFAgent):
"""A Qtopt Agent.
Implements the Qt-Opt algorithm from
"QT-Opt: Scalable Deep Reinforcement Learning for Vision-Based Robotic "
"Manipulation"
Dmitry Kalashnikov et al., 2018
https://arxiv.org/abs/1806.10293
"""
def __init__(
self,
time_step_spec,
action_spec,
q_network,
optimizer,
actions_sampler,
epsilon_greedy=0.1,
n_step_update=1,
emit_log_probability=False,
in_graph_bellman_update=True,
# Params for cem
init_mean_cem=None,
init_var_cem=None,
num_samples_cem=32,
num_elites_cem=4,
num_iter_cem=3,
# Params for target network updates
target_q_network=None,
target_update_tau=1.0,
target_update_period=1,
enable_td3=True,
target_q_network_delayed=None,
target_q_network_delayed_2=None,
delayed_target_update_period=5,
# Params for training.
td_errors_loss_fn=None,
auxiliary_loss_fns=None,
gamma=1.0,
reward_scale_factor=1.0,
gradient_clipping=None,
# Params for debugging
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=None,
info_spec=None,
name=None):
"""Creates a Qtopt Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
q_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call((observation, action), step_type). The
q_network is different from the one used in DQN where the input is state
and the output has multiple dimension representing Q values for
different actions. The input of this q_network is a tuple of state and
action. The output is one dimension representing Q value for that
specific action. DDPG critic network can be used directly here.
optimizer: The optimizer to use for training.
actions_sampler: A tf_agents.policies.sampler.ActionsSampler to be used to
sample actions in CEM.
epsilon_greedy: probability of choosing a random action in the default
epsilon-greedy collect policy (used only if a wrapper is not provided to
the collect_policy method).
n_step_update: Currently, only n_step_update == 1 is supported.
emit_log_probability: Whether policies emit log probabilities or not.
in_graph_bellman_update: If False, configures the agent to expect
experience containing computed q_values in the policy_step's info field.
This allows simplifies splitting the loss calculation across several
jobs.
init_mean_cem: Initial mean value of the Gaussian distribution to sample
actions for CEM.
init_var_cem: Initial variance value of the Gaussian distribution to
sample actions for CEM.
num_samples_cem: Number of samples to sample for each iteration in CEM.
num_elites_cem: Number of elites to select for each iteration in CEM.
num_iter_cem: Number of iterations in CEM.
target_q_network: (Optional.) A `tf_agents.network.Network`
to be used as the target network during Q learning. Every
`target_update_period` train steps, the weights from
`q_network` are copied (possibly with smoothing via
`target_update_tau`) to `target_q_network`.
If `target_q_network` is not provided, it is created by
making a copy of `q_network`, which initializes a new
network with the same structure and its own layers and weights.
Network copying is performed via the `Network.copy` superclass method,
with the same arguments used during the original network's construction
and may inadvertently lead to weights being shared between networks.
This can happen if, for example, the original
network accepted a pre-built Keras layer in its `__init__`, or
accepted a Keras layer that wasn't built, but neglected to create
a new copy.
In these cases, it is up to you to provide a target Network having
weights that are not shared with the original `q_network`.
If you provide a `target_q_network` that shares any
weights with `q_network`, an exception is thrown.
target_update_tau: Factor for soft update of the target networks.
target_update_period: Period for soft update of the target networks.
enable_td3: Whether or not to enable using a delayed target network to
calculate q value and assign min(q_delayed, q_delayed_2) as
q_next_state.
target_q_network_delayed: (Optional.) Similar network as
target_q_network but lags behind even more. See documentation
for target_q_network. Will only be used if 'enable_td3' is True.
target_q_network_delayed_2: (Optional.) Similar network as
target_q_network_delayed but lags behind even more. See documentation
for target_q_network. Will only be used if 'enable_td3' is True.
delayed_target_update_period: Used when enable_td3 is true. Period for
soft update of the delayed target networks.
td_errors_loss_fn: A function for computing the TD errors loss. If None, a
default value of element_wise_huber_loss is used. This function takes as
input the target and the estimated Q values and returns the loss for
each element of the batch.
auxiliary_loss_fns: An optional list of functions for computing auxiliary
losses. Each auxiliary_loss_fn expects network and transition as
input and should output auxiliary_loss and auxiliary_reg_loss.
gamma: A discount factor for future rewards.
reward_scale_factor: Multiplicative scale for the reward.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
info_spec: If not None, the policy info spec is set to this spec.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
Raises:
ValueError: If the action spec contains more than one action or action
spec minimum is not equal to 0.
NotImplementedError: If `q_network` has non-empty `state_spec` (i.e., an
RNN is provided) and `n_step_update > 1`.
"""
tf.Module.__init__(self, name=name)
self._sampler = actions_sampler
self._init_mean_cem = init_mean_cem
self._init_var_cem = init_var_cem
self._num_samples_cem = num_samples_cem
self._num_elites_cem = num_elites_cem
self._num_iter_cem = num_iter_cem
self._in_graph_bellman_update = in_graph_bellman_update
if not in_graph_bellman_update:
if info_spec is not None:
self._info_spec = info_spec
else:
self._info_spec = {
'target_q': tensor_spec.TensorSpec((), tf.float32),
}
else:
self._info_spec = ()
self._q_network = q_network
net_observation_spec = (time_step_spec.observation, action_spec)
q_network.create_variables(net_observation_spec)
if target_q_network:
target_q_network.create_variables(net_observation_spec)
self._target_q_network = common.maybe_copy_target_network_with_checks(
self._q_network, target_q_network, input_spec=net_observation_spec,
name='TargetQNetwork')
self._target_updater = self._get_target_updater(target_update_tau,
target_update_period)
self._enable_td3 = enable_td3
if (not self._enable_td3 and
(target_q_network_delayed or target_q_network_delayed_2)):
raise ValueError('enable_td3 is set to False but target_q_network_delayed'
' or target_q_network_delayed_2 is passed.')
if self._enable_td3:
if target_q_network_delayed:
target_q_network_delayed.create_variables()
self._target_q_network_delayed = (
common.maybe_copy_target_network_with_checks(
self._q_network, target_q_network_delayed,
'TargetQNetworkDelayed'))
self._target_updater_delayed = self._get_target_updater_delayed(
1.0, delayed_target_update_period)
if target_q_network_delayed_2:
target_q_network_delayed_2.create_variables()
self._target_q_network_delayed_2 = (
common.maybe_copy_target_network_with_checks(
self._q_network, target_q_network_delayed_2,
'TargetQNetworkDelayed2'))
self._target_updater_delayed_2 = self._get_target_updater_delayed_2(
1.0, delayed_target_update_period)
self._update_target = self._update_both
else:
self._update_target = self._target_updater
self._target_q_network_delayed = None
self._target_q_network_delayed_2 = None
self._check_network_output(self._q_network, 'q_network')
self._check_network_output(self._target_q_network, 'target_q_network')
self._epsilon_greedy = epsilon_greedy
self._n_step_update = n_step_update
self._optimizer = optimizer
self._td_errors_loss_fn = (
td_errors_loss_fn or common.element_wise_huber_loss)
self._auxiliary_loss_fns = auxiliary_loss_fns
self._gamma = gamma
self._reward_scale_factor = reward_scale_factor
self._gradient_clipping = gradient_clipping
policy, collect_policy = self._setup_policy(time_step_spec, action_spec,
emit_log_probability)
if q_network.state_spec and n_step_update != 1:
raise NotImplementedError(
'QtOptAgent does not currently support n-step updates with stateful '
'networks (i.e., RNNs), but n_step_update = {}'.format(n_step_update))
# Bypass the train_sequence_length check when RNN is used.
train_sequence_length = (
n_step_update + 1 if not q_network.state_spec else None)
super(QtOptAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=train_sequence_length,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter,
)
self._setup_data_converter(q_network, gamma, n_step_update)
@property
def policy_q_network(self):
return self._target_q_network
@property
def enable_td3(self):
return self._enable_td3
def _setup_data_converter(self, q_network, gamma, n_step_update):
if q_network.state_spec:
if not self._in_graph_bellman_update:
self._data_context = data_converter.DataContext(
time_step_spec=self._time_step_spec,
action_spec=self._action_spec,
info_spec=self._collect_policy.info_spec,
policy_state_spec=self._q_network.state_spec,
use_half_transition=True)
self._as_transition = data_converter.AsHalfTransition(
self.data_context, squeeze_time_dim=False)
else:
self._data_context = data_converter.DataContext(
time_step_spec=self._time_step_spec,
action_spec=self._action_spec,
info_spec=self._collect_policy.info_spec,
policy_state_spec=self._q_network.state_spec,
use_half_transition=False)
self._as_transition = data_converter.AsTransition(
self.data_context, squeeze_time_dim=False,
prepend_t0_to_next_time_step=True)
else:
if not self._in_graph_bellman_update:
self._data_context = data_converter.DataContext(
time_step_spec=self._time_step_spec,
action_spec=self._action_spec,
info_spec=self._collect_policy.info_spec,
policy_state_spec=self._q_network.state_spec,
use_half_transition=True)
self._as_transition = data_converter.AsHalfTransition(
self.data_context, squeeze_time_dim=True)
else:
# This reduces the n-step return and removes the extra time dimension,
# allowing the rest of the computations to be independent of the
# n-step parameter.
self._as_transition = data_converter.AsNStepTransition(
self.data_context, gamma=gamma, n=n_step_update)
def _setup_policy(self, time_step_spec, action_spec, emit_log_probability):
policy = qtopt_cem_policy.CEMPolicy(
time_step_spec,
action_spec,
q_network=self._target_q_network,
sampler=self._sampler,
init_mean=self._init_mean_cem,
init_var=self._init_var_cem,
info_spec=self._info_spec,
num_samples=self._num_samples_cem,
num_elites=self._num_elites_cem,
num_iterations=self._num_iter_cem,
emit_log_probability=emit_log_probability,
training=False)
collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy, epsilon=self._epsilon_greedy)
return policy, collect_policy
def _check_network_output(self, net, label):
network_utils.check_single_floating_network_output(
net.create_variables(), expected_output_shape=(), label=label)
def _initialize(self):
common.soft_variables_update(
self._q_network.variables, self._target_q_network.variables, tau=1.0)
if self._enable_td3:
common.soft_variables_update(
self._q_network.variables,
self._target_q_network_delayed.variables, tau=1.0)
common.soft_variables_update(
self._q_network.variables,
self._target_q_network_delayed_2.variables, tau=1.0)
def _update_both(self):
self._target_updater_delayed_2()
self._target_updater_delayed()
self._target_updater()
def _get_target_updater(self, tau=1.0, period=1):
"""Performs a soft update of the target network.
For each weight w_s in the q network, and its corresponding
weight w_t in the target_q_network, a soft update is:
w_t = (1 - tau) * w_t + tau * w_s
Args:
tau: A float scalar in [0, 1]. Default `tau=1.0` means hard update. Used
for target network.
period: Step interval at which the target network is updated. Used for
target network.
Returns:
A callable that performs a soft update of the target network parameters.
"""
with tf.name_scope('update_targets'):
def update():
return common.soft_variables_update(
self._q_network.variables,
self._target_q_network.variables,
tau,
tau_non_trainable=1.0)
return common.Periodically(update, period, 'periodic_update_targets')
def _get_target_updater_delayed(self, tau_delayed=1.0, period_delayed=1):
"""Performs a soft update of the delayed target network.
For each weight w_s in the q network, and its corresponding
weight w_t in the target_q_network, a soft update is:
w_t = (1 - tau) * w_t + tau * w_s
Args:
tau_delayed: A float scalar in [0, 1]. Default `tau=1.0` means hard
update. Used for delayed target network.
period_delayed: Step interval at which the target network is updated. Used
for delayed target network.
Returns:
A callable that performs a soft update of the target network parameters.
"""
with tf.name_scope('update_targets_delayed'):
def update_delayed():
return common.soft_variables_update(
self._target_q_network.variables,
self._target_q_network_delayed.variables,
tau_delayed,
tau_non_trainable=1.0)
return common.Periodically(update_delayed, period_delayed,
'periodic_update_targets_delayed')
def _get_target_updater_delayed_2(self, tau_delayed=1.0, period_delayed=1):
"""Performs a soft update of the delayed target network.
For each weight w_s in the q network, and its corresponding
weight w_t in the target_q_network, a soft update is:
w_t = (1 - tau) * w_t + tau * w_s
Args:
tau_delayed: A float scalar in [0, 1]. Default `tau=1.0` means hard
update. Used for delayed target network.
period_delayed: Step interval at which the target network is updated. Used
for delayed target network.
Returns:
A callable that performs a soft update of the target network parameters.
"""
with tf.name_scope('update_targets_delayed'):
def update_delayed():
return common.soft_variables_update(
self._target_q_network_delayed.variables,
self._target_q_network_delayed_2.variables,
tau_delayed,
tau_non_trainable=1.0)
return common.Periodically(update_delayed, period_delayed,
'periodic_update_targets_delayed')
# Use @common.function in graph mode or for speeding up.
def _train(self, experience, weights):
with tf.GradientTape() as tape:
loss_info = self._loss(
experience,
weights=weights,
training=True)
tf.debugging.check_numerics(loss_info.loss, 'Loss is inf or nan')
variables_to_train = self._q_network.trainable_weights
non_trainable_weights = self._q_network.non_trainable_weights
assert list(variables_to_train), "No variables in the agent's q_network."
grads = tape.gradient(loss_info.loss, variables_to_train)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = list(zip(grads, variables_to_train))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,
self._gradient_clipping)
if self._summarize_grads_and_vars:
grads_and_vars_with_non_trainable = (
grads_and_vars + [(None, v) for v in non_trainable_weights])
eager_utils.add_variables_summaries(grads_and_vars_with_non_trainable,
self.train_step_counter)
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
self._optimizer.apply_gradients(grads_and_vars)
self.train_step_counter.assign_add(1)
self._update_target()
return loss_info
def _add_auxiliary_losses(self, transition, weights, losses_dict):
"""Computes auxiliary losses, updating losses_dict in place."""
total_auxiliary_loss = 0
if self._auxiliary_loss_fns is not None:
for auxiliary_loss_fn in self._auxiliary_loss_fns:
auxiliary_loss, auxiliary_reg_loss = auxiliary_loss_fn(
network=self._q_network, transition=transition)
agg_auxiliary_loss = common.aggregate_losses(
per_example_loss=auxiliary_loss,
sample_weight=weights,
regularization_loss=auxiliary_reg_loss)
total_auxiliary_loss += agg_auxiliary_loss.total_loss
losses_dict.update(
{'auxiliary_loss_{}'.format(
auxiliary_loss_fn.__name__
): agg_auxiliary_loss.weighted,
'auxiliary_reg_loss_{}'.format(
auxiliary_loss_fn.__name__
): agg_auxiliary_loss.regularization,
})
return total_auxiliary_loss
def _loss(self,
experience,
weights=None,
training=False):
"""Computes loss for QtOpt training.
Args:
experience: A batch of experience data in the form of a `Trajectory` or
`Transition`. The structure of `experience` must match that of
`self.collect_policy.step_spec`.
If a `Trajectory`, all tensors in `experience` must be shaped
`[B, T, ...]` where `T` must be equal to `self.train_sequence_length`
if that property is not `None`.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights. The output td_loss will be scaled by these weights, and
the final scalar loss is the mean of these values.
training: Whether this loss is being used for training.
Returns:
loss: An instance of `QtOptLossInfo`.
Raises:
ValueError:
if the number of actions is greater than 1.
"""
transition = self._as_transition(experience)
time_steps, policy_steps, next_time_steps = transition
actions = policy_steps.action
with tf.name_scope('loss'):
q_values = self._compute_q_values(
time_steps, actions, policy_steps.state, training=training)
next_q_values = self._compute_next_q_values(
next_time_steps, policy_steps.info, policy_steps.state)
# This applies to any value of n_step_update and also in RNN-QtOpt.
# In the RNN-QtOpt case, inputs and outputs contain a time dimension.
td_targets = compute_td_targets(
next_q_values,
rewards=self._reward_scale_factor * next_time_steps.reward,
discounts=self._gamma * next_time_steps.discount)
valid_mask = tf.cast(~time_steps.is_last(), tf.float32)
td_error = valid_mask * (td_targets - q_values)
td_loss = valid_mask * self._td_errors_loss_fn(td_targets, q_values)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2):
# Do a sum over the time dimension.
td_loss = tf.reduce_sum(input_tensor=td_loss, axis=1)
# Aggregate across the elements of the batch and add regularization loss.
# Note: We use an element wise loss above to ensure each element is always
# weighted by 1/N where N is the batch size, even when some of the
# weights are zero due to boundary transitions. Weighting by 1/K where K
# is the actual number of non-zero weight would artificially increase
# their contribution in the loss. Think about what would happen as
# the number of boundary samples increases.
agg_loss = common.aggregate_losses(
per_example_loss=td_loss,
sample_weight=weights,
regularization_loss=self._q_network.losses)
total_loss = agg_loss.total_loss
losses_dict = {'td_loss': agg_loss.weighted,
'reg_loss': agg_loss.regularization}
total_auxiliary_loss = self._add_auxiliary_losses(
transition, weights, losses_dict)
total_loss += total_auxiliary_loss
losses_dict['total_loss'] = total_loss
common.summarize_scalar_dict(losses_dict,
step=self.train_step_counter,
name_scope='Losses/')
if self._summarize_grads_and_vars:
with tf.name_scope('Variables/'):
for var in self._q_network.trainable_weights:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
if self._debug_summaries:
diff_q_values = q_values - next_q_values
common.generate_tensor_summaries('td_error', td_error,
self.train_step_counter)
common.generate_tensor_summaries('q_values', q_values,
self.train_step_counter)
common.generate_tensor_summaries('next_q_values', next_q_values,
self.train_step_counter)
common.generate_tensor_summaries('diff_q_values', diff_q_values,
self.train_step_counter)
common.generate_tensor_summaries('reward', next_time_steps.reward,
self.train_step_counter)
return tf_agent.LossInfo(total_loss, QtOptLossInfo(td_loss=td_loss,
td_error=td_error))
def _compute_q_values(
self, time_steps, actions, network_state=(), training=False):
q_values, _ = self._q_network((time_steps.observation, actions),
step_type=time_steps.step_type,
network_state=network_state,
training=training)
return q_values
def _compute_next_q_values(self, next_time_steps, info, network_state=()):
if not self._in_graph_bellman_update:
return info['target_q']
next_action_policy_step = self._policy.action(
next_time_steps, network_state)
if self._enable_td3:
q_values_target_delayed, _ = self._target_q_network_delayed(
(next_time_steps.observation, next_action_policy_step.action),
step_type=next_time_steps.step_type,
network_state=network_state,
training=False)
q_values_target_delayed_2, _ = self._target_q_network_delayed_2(
(next_time_steps.observation, next_action_policy_step.action),
step_type=next_time_steps.step_type,
network_state=network_state,
training=False)
q_next_state = tf.minimum(q_values_target_delayed_2,
q_values_target_delayed)
else:
q_next_state, _ = self._target_q_network(
(next_time_steps.observation, next_action_policy_step.action),
step_type=next_time_steps.step_type,
network_state=network_state,
training=False)
if self._q_network.state_spec:
q_next_state = q_next_state[:, 1:]
return q_next_state
|
tensorflow/agents
|
tf_agents/agents/qtopt/qtopt_agent.py
|
Python
|
apache-2.0
| 28,159
|
month = int(input("Enter month: "))
year = int(input("Enter year: "))
if month == 1:
monthName = "January"
numberOfDaysInMonth = 31
elif month == 2:
monthName = "February"
if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):
numberOfDaysInMonth = 29
else:
numberOfDaysInMonth = 28
elif month == 3:
monthName = "March"
numberOfDaysInMonth = 31
elif month == 4:
monthName = "April"
numberOfDaysInMonth = 30
elif month == 5:
monthName = "May"
numberOfDaysInMonth = 31
elif month == 6:
monthName = "June"
numberOfDaysInMonth = 30
elif month == 7:
monthName = "July"
numberOfDaysInMonth = 31
elif month == 8:
monthName = "August"
numberOfDaysInMonth = 31
elif month == 9:
monthName = "September"
numberOfDaysInMonth = 30
elif month == 10:
monthName = "October"
numberOfDaysInMonth = 31
elif month == 11:
monthName = "November"
numberOfDaysInMonth = 30
else:
monthName = "December"
numberOfDaysInMonth = 31
print(monthName, year, "has", numberOfDaysInMonth, "days")
|
asmitde/TA-PSU-CMPSC101
|
Fall 2016/Homeworks/HW4/Solution/problem5.py
|
Python
|
apache-2.0
| 1,132
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import shutil
import shlex
import argparse
import netCDF4 as nc
import numpy as np
import tempfile
import subprocess as sp
import multiprocessing as mp
my_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(my_dir, './esmgrids'))
from esmgrids.mom_grid import MomGrid # noqa
from esmgrids.core2_grid import Core2Grid # noqa
from esmgrids.jra55_grid import Jra55Grid # noqa
from esmgrids.jra55_river_grid import Jra55RiverGrid # noqa
from esmgrids.daitren_runoff_grid import DaitrenRunoffGrid # noqa
"""
This script makes all of the remapping weights for ACCESS-OM2.
Run example:
./make_remap_weights.py /short/x77/nah599/access-om2/input/ \
/g/data/ua8/JRA55-do/RYF/v1-3/
"""
def convert_to_scrip_output(weights):
my_dir = os.path.dirname(os.path.realpath(__file__))
_, new_weights = tempfile.mkstemp(suffix='.nc', dir=my_dir)
# So that ncrename doesn't prompt for overwrite.
os.remove(new_weights)
cmdstring = ('ncrename -d n_a,src_grid_size -d n_b,dst_grid_size -d n_s,'
'num_links -d nv_a,src_grid_corners -d nv_b,dst_grid_corner'
's -v yc_a,src_grid_center_lat -v yc_b,dst_grid_center_lat '
'-v xc_a,src_grid_center_lon -v xc_b,dst_grid_center_lon -v'
' yv_a,src_grid_corner_lat -v xv_a,src_grid_corner_lon -v y'
'v_b,dst_grid_corner_lat -v xv_b,dst_grid_corner_lon -v mas'
'k_a,src_grid_imask -v mask_b,dst_grid_imask -v area_a,src_'
'grid_area -v area_b,dst_grid_area -v frac_a,src_grid_frac '
'-v frac_b,dst_grid_frac -v col,src_address -v row,dst_addr'
'ess {} {}')
cmd = cmdstring.format(weights, new_weights)
try:
sp.check_output(shlex.split(cmd))
except sp.CalledProcessError as e:
print(cmd, file=sys.stderr)
print(e.output, file=sys.stderr)
return None
# Fix the dimension of the remap_matrix.
with nc.Dataset(weights) as f_old, nc.Dataset(new_weights, 'r+') as f_new:
remap_matrix = f_new.createVariable('remap_matrix',
'f8', ('num_links', 'num_wgts'))
remap_matrix[:, 0] = f_old.variables['S'][:]
os.remove(weights)
return new_weights
def create_weights(src_grid, dest_grid, npes, method,
ignore_unmapped=False,
unmasked_src=True, unmasked_dest=False):
my_dir = os.path.dirname(os.path.realpath(__file__))
_, src_grid_scrip = tempfile.mkstemp(suffix='.nc', dir=my_dir)
_, dest_grid_scrip = tempfile.mkstemp(suffix='.nc', dir=my_dir)
_, regrid_weights = tempfile.mkstemp(suffix='.nc', dir=my_dir)
if unmasked_src:
src_grid.write_scrip(src_grid_scrip, write_test_scrip=False,
mask=np.zeros_like(src_grid.mask_t, dtype=int))
else:
src_grid.write_scrip(src_grid_scrip, write_test_scrip=False)
if unmasked_dest:
dest_grid.write_scrip(dest_grid_scrip, write_test_scrip=False,
mask=np.zeros_like(dest_grid.mask_t, dtype=int))
else:
dest_grid.write_scrip(dest_grid_scrip, write_test_scrip=False)
if ignore_unmapped:
ignore_unmapped = ['--ignore_unmapped']
else:
ignore_unmapped = []
try:
cmd = ['mpirun', '-np', str(npes), 'ESMF_RegridWeightGen'] + \
['--netcdf4',
'-s', src_grid_scrip,
'-d', dest_grid_scrip, '-m', method,
'-w', regrid_weights] + ignore_unmapped
print(cmd)
sp.check_output(cmd)
except sp.CalledProcessError as e:
print("Error: ESMF_RegridWeightGen failed ret {}".format(e.returncode),
file=sys.stderr)
print(e.output, file=sys.stderr)
log = 'PET0.RegridWeightGen.Log'
if os.path.exists(log):
print('Contents of {}:'.format(log), file=sys.stderr)
with open(log) as f:
print(f.read(), file=sys.stderr)
return None
os.remove(src_grid_scrip)
os.remove(dest_grid_scrip)
return regrid_weights
def find_grid_defs(input_dir, jra55_input, core_input):
"""
Return a dictionary containing the grid definition files.
"""
d = {}
d['MOM1'] = (os.path.join(input_dir, 'mom_1deg', 'ocean_hgrid.nc'),
os.path.join(input_dir, 'mom_1deg', 'ocean_mask.nc'))
d['MOM025'] = (os.path.join(input_dir, 'mom_025deg', 'ocean_hgrid.nc'),
os.path.join(input_dir, 'mom_025deg', 'ocean_mask.nc'))
d['MOM01'] = (os.path.join(input_dir, 'mom_01deg', 'ocean_hgrid.nc'),
os.path.join(input_dir, 'mom_01deg', 'ocean_mask.nc'))
d['CORE2'] = os.path.join(core_input, 't_10.0001.nc')
d['JRA55'] = os.path.join(jra55_input, 'RYF.t_10.1990_1991.nc')
d['JRA55_runoff'] = os.path.join(jra55_input,
'RYF.runoff_all.1990_1991.nc')
d['Daitren_runoff'] = os.path.join(core_input, 'runoff.daitren.clim.10FEB2011.nc')
return d
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help="""
The ACCESS-OM2 input directory.""")
parser.add_argument('jra55_input', help="""
The JRA55 input directory.""")
parser.add_argument('core_input', help="""
The CORE input directory.""")
parser.add_argument('--atm', default=None, help="""
Atmosphere grid to regrid from, can be one of:
CORE2, JRA55, JRA55_runoff, Daitren_runoff""")
parser.add_argument('--ocean', default=None, help="""
Ocean grid to regrid to, can be one of:
MOM1, MOM01, MOM025""")
parser.add_argument('--method', default=None, help="""
The interpolation method to use, can be patch, conserve or conserve2nd""")
parser.add_argument('--npes', default=None, help="""
The number of PEs to use.""")
parser.add_argument('--unmask_dest',
action='store_true',
help='Ignore destination grid mask')
args = parser.parse_args()
atm_options = ['JRA55', 'JRA55_runoff', 'CORE2', 'Daitren_runoff']
ocean_options = ['MOM1', 'MOM025', 'MOM01']
method_options = ['patch', 'conserve', 'conserve2nd']
if args.atm is None:
args.atm = atm_options
else:
if args.atm not in atm_options:
print("Error: bad atm grid.", file=sys.stderr)
parser.print_help()
return 1
args.atm = [args.atm]
if args.ocean is None:
args.ocean = ocean_options
else:
if args.ocean not in ocean_options:
print("Error: bad atm grid.", file=sys.stderr)
parser.print_help()
return 1
args.ocean = [args.ocean]
if args.method is None:
args.method = method_options
else:
args.method = [args.method]
if args.npes is None:
import multiprocessing as mp
args.npes = mp.cpu_count() // 2
grid_file_dict = find_grid_defs(args.input_dir, args.jra55_input, args.core_input)
for ocean in args.ocean:
umask_file = grid_file_dict[ocean][1]
dest_grid = MomGrid.fromfile(grid_file_dict[ocean][0],
mask_file=umask_file)
for atm in args.atm:
if atm == 'CORE2':
src_grid = Core2Grid(grid_file_dict[atm])
elif atm == 'Daitren_runoff':
src_grid = DaitrenRunoffGrid(grid_file_dict[atm])
elif atm == 'JRA55':
src_grid = Jra55Grid(grid_file_dict[atm])
elif atm == 'JRA55_runoff':
src_grid = Jra55RiverGrid(grid_file_dict[atm], calc_areas=False)
else:
print('Unrecognised atmosphere grid: {}'.format(atm))
return 1
for method in args.method:
weights = create_weights(src_grid, dest_grid, args.npes,
method, unmasked_dest=args.unmask_dest)
if not weights:
return 1
weights = convert_to_scrip_output(weights)
if not weights:
return 1
shutil.move(weights, '{}_{}_{}.nc'.format(atm, ocean, method))
return 0
if __name__ == "__main__":
sys.exit(main())
|
CWSL/access-om
|
tools/make_remap_weights.py
|
Python
|
apache-2.0
| 8,602
|
#!/usr/bin/env python
#
# parttool is used to perform partition level operations - reading,
# writing, erasing and getting info about the partition.
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import sys
import subprocess
import tempfile
import gen_esp32part as gen
__version__ = '1.0'
IDF_COMPONENTS_PATH = os.path.expandvars(os.path.join("$IDF_PATH", "components"))
ESPTOOL_PY = os.path.join(IDF_COMPONENTS_PATH, "esptool_py", "esptool", "esptool.py")
quiet = False
def status(msg):
""" Print status message to stderr """
if not quiet:
print(msg)
def _invoke_esptool(esptool_args, args):
m_esptool_args = [sys.executable, ESPTOOL_PY]
if args.port != "":
m_esptool_args.extend(["--port", args.port])
m_esptool_args.extend(esptool_args)
if quiet:
with open(os.devnull, "w") as fnull:
subprocess.check_call(m_esptool_args, stdout=fnull, stderr=fnull)
else:
subprocess.check_call(m_esptool_args)
def _get_partition_table(args):
partition_table = None
gen.offset_part_table = int(args.partition_table_offset, 0)
if args.partition_table_file:
status("Reading partition table from partition table file...")
try:
with open(args.partition_table_file, "rb") as partition_table_file:
partition_table = gen.PartitionTable.from_binary(partition_table_file.read())
status("Partition table read from binary file {}".format(partition_table_file.name))
except (gen.InputError, TypeError):
with open(args.partition_table_file, "r") as partition_table_file:
partition_table_file.seek(0)
partition_table = gen.PartitionTable.from_csv(partition_table_file.read())
status("Partition table read from CSV file {}".format(partition_table_file.name))
else:
port_info = (" on port " + args.port if args.port else "")
status("Reading partition table from device{}...".format(port_info))
f_name = None
with tempfile.NamedTemporaryFile(delete=False) as f:
f_name = f.name
try:
invoke_args = ["read_flash", str(gen.offset_part_table), str(gen.MAX_PARTITION_LENGTH), f_name]
_invoke_esptool(invoke_args, args)
with open(f_name, "rb") as f:
partition_table = gen.PartitionTable.from_binary(f.read())
status("Partition table read from device" + port_info)
finally:
os.unlink(f_name)
return partition_table
def _get_partition(args):
partition_table = _get_partition_table(args)
partition = None
if args.partition_name:
partition = partition_table.find_by_name(args.partition_name)
elif args.partition_type and args.partition_subtype:
partition = partition_table.find_by_type(args.partition_type, args.partition_subtype)
elif args.partition_boot_default:
search = ["factory"] + ["ota_{}".format(d) for d in range(16)]
for subtype in search:
partition = partition_table.find_by_type("app", subtype)
if partition is not None:
break
else:
raise RuntimeError("Invalid partition selection arguments. Specify --partition-name OR \
--partition-type and --partition-subtype OR --partition--boot-default.")
if partition:
status("Found partition {}".format(str(partition)))
return partition
def _get_and_check_partition(args):
partition = None
partition = _get_partition(args)
if not partition:
raise RuntimeError("Unable to find specified partition.")
return partition
def write_partition(args):
erase_partition(args)
partition = _get_and_check_partition(args)
status("Checking input file size...")
with open(args.input, "rb") as input_file:
content_len = len(input_file.read())
if content_len != partition.size:
status("File size (0x{:x}) does not match partition size (0x{:x})".format(content_len, partition.size))
else:
status("File size matches partition size (0x{:x})".format(partition.size))
_invoke_esptool(["write_flash", str(partition.offset), args.input], args)
status("Written contents of file '{}' to device at offset 0x{:x}".format(args.input, partition.offset))
def read_partition(args):
partition = _get_and_check_partition(args)
_invoke_esptool(["read_flash", str(partition.offset), str(partition.size), args.output], args)
status("Read partition contents from device at offset 0x{:x} to file '{}'".format(partition.offset, args.output))
def erase_partition(args):
partition = _get_and_check_partition(args)
_invoke_esptool(["erase_region", str(partition.offset), str(partition.size)], args)
status("Erased partition at offset 0x{:x} on device".format(partition.offset))
def get_partition_info(args):
partition = None
if args.table:
partition_table = _get_partition_table(args)
if args.table.endswith(".csv"):
partition_table = partition_table.to_csv()
else:
partition_table = partition_table.to_binary()
with open(args.table, "wb") as table_file:
table_file.write(partition_table)
status("Partition table written to " + table_file.name)
else:
partition = _get_partition(args)
if partition:
info_dict = {
"offset": '0x{:x}'.format(partition.offset),
"size": '0x{:x}'.format(partition.size)
}
infos = []
try:
for info in args.info:
infos += [info_dict[info]]
except KeyError:
raise RuntimeError("Request for unknown partition info {}".format(info))
status("Requested partition information [{}]:".format(", ".join(args.info)))
print(" ".join(infos))
else:
status("Partition not found")
def generate_blank_partition_file(args):
output = None
stdout_binary = None
partition = _get_and_check_partition(args)
output = b"\xFF" * partition.size
try:
stdout_binary = sys.stdout.buffer # Python 3
except AttributeError:
stdout_binary = sys.stdout
with stdout_binary if args.output == "" else open(args.output, 'wb') as f:
f.write(output)
status("Blank partition file '{}' generated".format(args.output))
def main():
global quiet
parser = argparse.ArgumentParser("ESP-IDF Partitions Tool")
parser.add_argument("--quiet", "-q", help="suppress stderr messages", action="store_true")
# There are two possible sources for the partition table: a device attached to the host
# or a partition table CSV/binary file. These sources are mutually exclusive.
partition_table_info_source_args = parser.add_mutually_exclusive_group()
partition_table_info_source_args.add_argument("--port", "-p", help="port where the device to read the partition table from is attached", default="")
partition_table_info_source_args.add_argument("--partition-table-file", "-f", help="file (CSV/binary) to read the partition table from")
parser.add_argument("--partition-table-offset", "-o", help="offset to read the partition table from", default="0x8000")
# Specify what partition to perform the operation on. This can either be specified using the
# partition name or the first partition that matches the specified type/subtype
partition_selection_args = parser.add_mutually_exclusive_group()
partition_selection_args.add_argument("--partition-name", "-n", help="name of the partition")
partition_selection_args.add_argument("--partition-type", "-t", help="type of the partition")
partition_selection_args.add_argument('--partition-boot-default', "-d", help='select the default boot partition \
using the same fallback logic as the IDF bootloader', action="store_true")
parser.add_argument("--partition-subtype", "-s", help="subtype of the partition")
subparsers = parser.add_subparsers(dest="operation", help="run parttool -h for additional help")
# Specify the supported operations
read_part_subparser = subparsers.add_parser("read_partition", help="read partition from device and dump contents into a file")
read_part_subparser.add_argument("--output", help="file to dump the read partition contents to")
write_part_subparser = subparsers.add_parser("write_partition", help="write contents of a binary file to partition on device")
write_part_subparser.add_argument("--input", help="file whose contents are to be written to the partition offset")
subparsers.add_parser("erase_partition", help="erase the contents of a partition on the device")
print_partition_info_subparser = subparsers.add_parser("get_partition_info", help="get partition information")
print_partition_info_subparser_info_type = print_partition_info_subparser.add_mutually_exclusive_group()
print_partition_info_subparser_info_type.add_argument("--info", help="type of partition information to get", nargs="+")
print_partition_info_subparser_info_type.add_argument("--table", help="dump the partition table to a file")
generate_blank_subparser = subparsers.add_parser("generate_blank_partition_file", help="generate a blank (all 0xFF) partition file of \
the specified partition that can be flashed to the device")
generate_blank_subparser.add_argument("--output", help="blank partition file filename")
args = parser.parse_args()
quiet = args.quiet
# No operation specified, display help and exit
if args.operation is None:
if not quiet:
parser.print_help()
sys.exit(1)
# Else execute the operation
operation_func = globals()[args.operation]
if quiet:
# If exceptions occur, suppress and exit quietly
try:
operation_func(args)
except Exception:
sys.exit(2)
else:
operation_func(args)
if __name__ == '__main__':
main()
|
krzychb/rtd-test-bed
|
components/partition_table/parttool.py
|
Python
|
apache-2.0
| 10,853
|
# -*- coding: utf-8 -*-
from model.address import Address
def test_add_address(app, json_addresses, db, check_ui):
address = json_addresses
old_addresses = db.get_address_list()
app.address.create(address)
old_addresses.append(address)
assert len(old_addresses) == app.address.count()
if check_ui:
assert sorted(old_addresses, key=Address.id_or_max) == sorted(app.address.get_addresses_list(), key=Address.id_or_max)
|
vpalex999/python_training
|
test/test_add_address.py
|
Python
|
apache-2.0
| 453
|
# encoding=utf-8
__author__ = 'Hinsteny'
class Student(object):
def __init__(self, name, score):
self._name = name
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
bart = Student('Bart Simpson', 98)
print(bart._name)
print(bart._Student__name)
bart.__name = "www"
print(bart.__name)
|
InverseLina/python-practice
|
Category/Test__.py
|
Python
|
apache-2.0
| 389
|
#!/usr/bin/env python
import os
import subprocess
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
SERVICE_NAME = 'stage'
# Some runtime properties to be used in teardown
runtime_props = ctx.instance.runtime_properties
runtime_props['service_name'] = SERVICE_NAME
STAGE_USER = '{0}_user'.format(SERVICE_NAME)
STAGE_GROUP = '{0}_group'.format(SERVICE_NAME)
runtime_props['service_user'] = STAGE_USER
runtime_props['service_group'] = STAGE_GROUP
HOME_DIR = join('/opt', 'cloudify-{0}'.format(SERVICE_NAME))
NODEJS_DIR = join('/opt', 'nodejs')
LOG_DIR = join(utils.BASE_LOG_DIR, SERVICE_NAME)
RESOURCES_DIR = join(HOME_DIR, 'resources')
runtime_props['home_dir'] = HOME_DIR
runtime_props['files_to_remove'] = [HOME_DIR, NODEJS_DIR, LOG_DIR]
ctx_properties = ctx.node.properties.get_all()
CONFIG_PATH = 'components/{0}/config'.format(SERVICE_NAME)
def _install_stage():
nodejs_source_url = ctx_properties['nodejs_tar_source_url']
stage_source_url = ctx_properties['stage_tar_source_url']
if not utils.resource_factory.local_resource_exists(stage_source_url):
ctx.logger.info('Stage package not found in manager resources '
'package. Stage will not be installed.')
ctx.instance.runtime_properties['skip_installation'] = 'true'
return
# injected as an input to the script
ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
os.environ.get('INFLUXDB_ENDPOINT_IP')
utils.set_selinux_permissive()
utils.copy_notice(SERVICE_NAME)
utils.mkdir(NODEJS_DIR)
utils.mkdir(HOME_DIR)
utils.mkdir(LOG_DIR)
utils.mkdir(RESOURCES_DIR)
utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR)
ctx.logger.info('Installing NodeJS...')
nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME)
utils.untar(nodejs, NODEJS_DIR)
utils.remove(nodejs)
ctx.logger.info('Installing Cloudify Stage (UI)...')
stage_tar = utils.download_cloudify_resource(stage_source_url,
SERVICE_NAME)
if 'community' in stage_tar:
ctx.logger.info('Community edition')
ctx.instance.runtime_properties['community_mode'] = '-mode community'
else:
ctx.instance.runtime_properties['community_mode'] = ''
utils.untar(stage_tar, HOME_DIR)
utils.remove(stage_tar)
ctx.logger.info('Fixing permissions...')
utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR)
utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR)
utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR)
configure_script(
'restore-snapshot.py',
'Restore stage directories from a snapshot path',
)
configure_script(
'make-auth-token.py',
'Update auth token for stage user',
)
# Allow snapshot restores to restore token
utils.allow_user_to_sudo_command(
'/opt/manager/env/bin/python',
'Snapshot update auth token for stage user',
allow_as=STAGE_USER,
)
subprocess.check_call([
'sudo', '-u', 'stage_user',
'/opt/manager/env/bin/python',
'/opt/cloudify/stage/make-auth-token.py',
])
utils.logrotate(SERVICE_NAME)
utils.systemd.configure(SERVICE_NAME)
backend_dir = join(HOME_DIR, 'backend')
npm_path = join(NODEJS_DIR, 'bin', 'npm')
subprocess.check_call(
'cd {0}; {1} run db-migrate'.format(backend_dir, npm_path),
shell=True)
def configure_script(script_name, description):
utils.deploy_sudo_command_script(
script_name,
description,
component=SERVICE_NAME,
allow_as=STAGE_USER,
)
utils.chmod('a+rx', '/opt/cloudify/stage/' + script_name)
utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER])
def main():
_install_stage()
main()
|
cloudify-cosmo/cloudify-manager-blueprints
|
components/stage/scripts/create.py
|
Python
|
apache-2.0
| 3,954
|
# Copyright 2014 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Constants that are used in multiple classes
"""
import os
PROJECT_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
WEB_RESOURCE_DIR = os.path.join(PROJECT_ROOT_DIR, "web_resources/")
PASSED = "passed"
FAILED = "failed"
SKIPPED = "skipped"
PROCESS_NOT_RUNNING_PID = None
DEFAULT_TEST_PHASE = -1
DEFAULT_ITERATION = 1
CONFIG_SEPARATOR = '='
MACHINE_SEPARATOR = '='
FILTER_NAME_ALLOW_ALL=''
|
arpras/Zopkio
|
zopkio/constants.py
|
Python
|
apache-2.0
| 1,224
|
from ciphertext import Ciphertext
import time
c = Ciphertext()
c.text_in('hello')
assert c.text_out() == 'hello'
d = Ciphertext()
d.text_in('No King of the Isles had ever needed a Hand')
with open('sample.txt','r') as source:
natural_text = source.read()
natural = Ciphertext()
natural.text_in(natural_text)
natural.pair_frequencies()
natural.triplet_frequencies()
natural.quadruplet_frequencies()
|
paulsbrookes/subcipher
|
index_based/test.py
|
Python
|
apache-2.0
| 405
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Binary entry-point for Incoherent RL experiments.
"""
from absl import app
from absl import flags
from absl import logging
from dopamine.discrete_domains import run_experiment
from dopamine.jax.agents.dqn import dqn_agent as jax_dqn_agent
from dopamine.jax.agents.implicit_quantile import implicit_quantile_agent as jax_implicit_quantile_agent
from dopamine.jax.agents.quantile import quantile_agent as jax_quantile_agent
from dopamine.jax.agents.rainbow import rainbow_agent as jax_rainbow_agent
import gin
import jax
import jax.numpy as jnp
from generalization_representations_rl_aistats22.atari import incoherent_dqn_agent
from generalization_representations_rl_aistats22.atari import incoherent_implicit_quantile_agent
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files.')
FLAGS = flags.FLAGS
def sample_gaussian(rng, mu, var):
rng1, rng2 = jax.random.split(rng)
return rng1, mu + jnp.sqrt(var) * jax.random.normal(rng2)
@gin.configurable
def create_incoherent_agent(sess,
environment,
agent_name='incoherent_dqn',
summary_writer=None,
debug_mode=False):
"""Creates an incoherent agent.
Args:
sess: TF session, unused since we are in JAX.
environment: A gym environment (e.g. Atari 2600).
agent_name: str, name of the agent to create.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
debug_mode: bool, unused.
Returns:
An active and passive agent.
"""
assert agent_name is not None
del sess
del debug_mode
if agent_name == 'dqn':
return jax_dqn_agent.JaxDQNAgent(num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'quantile':
return jax_quantile_agent.JaxQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'rainbow':
return jax_rainbow_agent.JaxRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'implicit_quantile':
return jax_implicit_quantile_agent.JaxImplicitQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'incoherent_dqn':
return incoherent_dqn_agent.IncoherentDQNAgent(
num_actions=environment.action_space.n, summary_writer=summary_writer)
elif agent_name == 'incoherent_implicit_quantile':
return incoherent_implicit_quantile_agent.IncoherentImplicitQuantileAgent(
num_actions=environment.action_space.n, summary_writer=summary_writer)
elif agent_name == 'mimplicit_quantile':
return incoherent_implicit_quantile_agent.IncoherentImplicitQuantileAgent(
num_actions=environment.action_space.n, coherence_weight=0.0,
tau=0.03, summary_writer=summary_writer)
elif agent_name == 'incoherent_mimplicit_quantile':
return incoherent_implicit_quantile_agent.IncoherentImplicitQuantileAgent(
num_actions=environment.action_space.n, tau=0.03,
summary_writer=summary_writer)
else:
raise ValueError('Unknown agent: {}'.format(agent_name))
def main(unused_argv):
"""Main method.
Args:
unused_argv: Arguments (unused).
"""
logging.set_verbosity(logging.INFO)
base_dir = FLAGS.base_dir
gin_files = FLAGS.gin_files
gin_bindings = FLAGS.gin_bindings
run_experiment.load_gin_configs(gin_files, gin_bindings)
runner = run_experiment.TrainRunner(base_dir, create_incoherent_agent)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
|
google-research/google-research
|
generalization_representations_rl_aistats22/atari/train.py
|
Python
|
apache-2.0
| 4,622
|
# -*- coding: utf-8 -*-
__all__ = [
"PYTHAINLP_DEFAULT_DATA_DIR",
"get_full_data_path",
"get_pythainlp_data_path",
"get_pythainlp_path",
"misspell",
]
from pythainlp.tools.path import (
PYTHAINLP_DEFAULT_DATA_DIR,
get_full_data_path,
get_pythainlp_data_path,
get_pythainlp_path,
)
from pythainlp.tools.misspell import misspell
|
PyThaiNLP/pythainlp
|
pythainlp/tools/__init__.py
|
Python
|
apache-2.0
| 365
|
from block import *
import os
import time
import base64
from logging import ERROR, WARN, INFO, DEBUG
class file_data_reader(Block):
def on_load(self, config):
self.config = config
self.add_port("input", Port.PUSH, Port.UNNAMED, ["url"])
self.add_port("output", Port.PUSH, Port.UNNAMED, ["url", "data"])
def get_data(self, log):
return [base64.b64encode(BlockUtils.fetch_file_at_url(u, self.ip_address))
for u in log["url"]]
def recv_push(self, port, log):
if log.log.has_key("token"):
self.log(INFO, self.id + " got the finish token for directory " + log.log["token"][0])
else:
log.append_field("data", self.get_data(log.log))
self.buffered_push("output", log)
if self.config.has_key("sleep"):
time.sleep(self.config["sleep"])
|
mpi-sws-rse/datablox
|
blox/file_data_reader__1_0/b_file_data_reader.py
|
Python
|
apache-2.0
| 803
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous statistics utilities."""
import math
import numpy
from bayeslite.math_util import gamma_above
from bayeslite.util import float_sum
def arithmetic_mean(array):
"""Arithmetic mean of elements of `array`."""
return float_sum(array) / len(array)
def pearsonr(a0, a1):
"""Pearson r, product-moment correlation coefficient, of two samples.
Covariance divided by product of standard deviations.
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample
"""
n = len(a0)
assert n == len(a1)
if n == 0:
# No data, so no notion of correlation.
return float('NaN')
a0 = numpy.array(a0)
a1 = numpy.array(a1)
m0 = numpy.mean(a0)
m1 = numpy.mean(a1)
num = numpy.sum((a0 - m0)*(a1 - m1))
den0_sq = numpy.sum((a0 - m0)**2)
den1_sq = numpy.sum((a1 - m1)**2)
den = math.sqrt(den0_sq*den1_sq)
if den == 0.0:
# No variation in at least one column, so no notion of
# correlation.
return float('NaN')
r = num / den
# Clamp r in [-1, +1] in case of floating-point error.
r = min(r, +1.0)
r = max(r, -1.0)
return r
def signum(x):
"""Sign of `x`: ``-1 if x<0, 0 if x=0, +1 if x>0``."""
if x < 0:
return -1
elif 0 < x:
return +1
else:
return 0
def chi2_contingency(contingency):
"""Pearson chi^2 test of independence statistic on contingency table.
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence
"""
contingency = numpy.array(contingency, dtype=int, ndmin=2)
assert contingency.ndim == 2
n = float(numpy.sum(contingency))
n0 = contingency.shape[0]
n1 = contingency.shape[1]
assert 0 < n0
assert 0 < n1
p0 = numpy.sum(contingency, axis=1)/n
p1 = numpy.sum(contingency, axis=0)/n
expected = n * numpy.outer(p0, p1)
return numpy.sum(((contingency - expected)**2)/expected)
def f_oneway(groups):
"""F-test statistic for one-way analysis of variance (ANOVA).
https://en.wikipedia.org/wiki/F-test#Multiple-comparison_ANOVA_problems
``groups[i][j]`` is jth observation in ith group.
"""
# We turn groups into a list of numpy 1d-arrays, not into a numpy
# 2d-array, because the lengths are heterogeneous.
groups = [numpy.array(group, dtype=float, ndmin=1) for group in groups]
assert all(group.ndim == 1 for group in groups)
K = len(groups)
N = sum(len(group) for group in groups)
means = [numpy.mean(group) for group in groups]
overall_mean = numpy.sum(numpy.sum(group) for group in groups) / N
bgv = numpy.sum(len(group) * (mean - overall_mean)**2 / (K - 1)
for group, mean in zip(groups, means))
wgv = numpy.sum(numpy.sum((group - mean)**2)/float(N - K)
for group, mean in zip(groups, means))
# Special cases for which Python wants to raise an error rather
# than giving the sensible IEEE 754 result.
if wgv == 0.0:
if bgv == 0.0:
# No variation between or within groups, so we cannot
# ascertain any correlation between them -- it is as if we
# had no data about the groups: every value in every group
# is the same.
return float('NaN')
else:
# Within-group variability is zero, meaning for each
# group, each value is the same; between-group variability
# is nonzero, meaning there is variation between the
# groups. So if there were zero correlation we could not
# possibly observe this, whereas all finite F statistics
# could be observed with zero correlation.
return float('+inf')
return bgv / wgv
def t_cdf(x, df):
"""Approximate CDF for Student's t distribution.
``t_cdf(x, df) = P(T_df < x)``
"""
if df <= 0:
raise ValueError('Degrees of freedom must be positive.')
if x == 0:
return 0.5
import scipy.stats
return scipy.stats.t.cdf(x, df)
def chi2_sf(x, df):
"""Survival function for chi^2 distribution."""
if df <= 0:
raise ValueError('Nonpositive df: %f' % (df,))
if x < 0:
return 1.
x = float(x)
df = float(df)
return gamma_above(df/2., x/2.)
def f_sf(x, df_num, df_den):
"""Approximate survival function for the F distribution.
``f_sf(x, df_num, df_den) = P(F_{df_num, df_den} > x)``
"""
if df_num <= 0 or df_den <= 0:
raise ValueError('Degrees of freedom must be positive.')
if x <= 0:
return 1.0
import scipy.stats
return scipy.stats.f.sf(x, df_num, df_den)
def gauss_suff_stats(data):
"""Summarize an array of data as (count, mean, standard deviation).
The algorithm is the "Online algorithm" presented in Knuth Volume
2, 3rd ed, p. 232, originally credited to "Note on a Method for
Calculating Corrected Sums of Squares and Products" B. P. Welford
Technometrics Vol. 4, No. 3 (Aug., 1962), pp. 419-420. This has
the advantage over naively accumulating the sum and sum of squares
that it is less subject to precision loss through massive
cancellation.
This version collected 8/31/15 from
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
n = 0
mean = 0.0
M2 = 0.0 # n * sigma^2
for x in data:
n = n + 1
delta = x - mean
mean = mean + delta/n
M2 = M2 + delta*(x - mean)
if n < 1:
return (n, mean, 0.0)
else:
return (n, mean, math.sqrt(M2 / float(n)))
|
probcomp/bayeslite
|
src/stats.py
|
Python
|
apache-2.0
| 6,235
|
# import the necessary packages
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage.transform import rescale
from skimage import io
import matplotlib.pyplot as plt
import argparse
img_file = "../data/similar_objects/1.JPG"
# load the image and convert it to a floating point data type
image = io.imread(img_file)
image = rescale(image, 0.25)
image = img_as_float(image)
# loop over the number of segments
for numSegments in (100, 200, 300):
# apply SLIC and extract (approximately) the supplied number
# of segments
segments = slic(image, n_segments = numSegments, sigma = 5)
# show the output of SLIC
fig = plt.figure("Superpixels -- %d segments" % (numSegments))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(image, segments))
plt.axis("off")
# show the plots
plt.show()
|
m3rik/nn
|
CNTK/GeneralizedHoughTransform/slic.py
|
Python
|
apache-2.0
| 900
|
#!/usr/bin/python
import re
from multiprocessing import Process
def avgList(list):
min = list[0]
max = list[0]
for val in list:
if val< min:
min=val
if val > max:
max=val
list.remove(min)
list.remove(max)
cummulated = 0
count = 0
for val in list:
cummulated = cummulated+val
count = count+1
avg = float(cummulated)/count
return avg
def parsAndFillList(fileName, refString, list):
startingMinute=0
startingSeconds=0
startingHoure=0
f = open(fileName, 'r')
for line in f:
if refString in line:
startingHoure = int(line[9:11])
startingMinute = int(line[12:14])
startingSeconds = int(line[15:17])
break
startingMeasuresSecond = startingSeconds+30
startingMeasuresMinute = startingMinute
startingMeasuresHoure = startingHoure
if startingMeasuresSecond > 60 :
startingMeasuresMinute = startingMeasuresMinute+1
startingMeasuresSecond = startingMeasuresSecond - 60
if startingMeasuresMinute >=60:
startingMeasuresMinute = startingMeasuresMinute-60
startingMeasuresHoure = startingMeasuresHoure+1
# print startingMeasuresHoure, startingMeasuresMinute, startingMeasuresSecond
counter = 0
f = open(fileName, 'r')
for line in f:
if refString in line:
houre = int(line[9:11])
minute = int(line[12:14])
seconds = int(line[15:17])
if minute == startingMeasuresMinute:
if seconds>= startingMeasuresSecond:
match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
if not line[match.end():] in list:
list.append(line[match.end():])
counter= counter+1
if minute > startingMeasuresMinute and minute <= startingMeasuresMinute+1:
if seconds < startingMeasuresSecond:
match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
if not line[match.end():] in list:
list.append(line[match.end():])
counter=counter+1
if startingMeasuresMinute==59:
if houre > startingMeasuresHoure and minute < 1:
if seconds < startingMeasuresSecond:
match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
if not line[match.end():] in list:
counter=counter+1
list.append(line[match.end():])
return counter
def countFromList(fileName, refString, list):
#print fileName, refString, list
startingMinute=0
startingSeconds=0
startingHoure=0
f = open(fileName, 'r')
for line in f:
#if refString in line:
startingHoure = int(line[9:11])
startingMinute = int(line[12:14])
startingSeconds = int(line[15:17])
break
startingMeasuresSecond = startingSeconds+30
startingMeasuresMinute = startingMinute
startingMeasuresHoure = startingHoure
if startingMeasuresSecond > 60 :
startingMeasuresMinute = startingMeasuresMinute+1
startingMeasuresSecond = startingMeasuresSecond - 60
if startingMeasuresMinute >=60:
startingMeasuresMinute = startingMeasuresMinute-60
startingMeasuresHoure = startingMeasuresHoure+1
# print startingMeasuresHoure, startingMeasuresMinute, startingMeasuresSecond
counter = 0
alreadySeen=[]
f = open(fileName, 'r')
for line in f:
if refString in line:
houre = int(line[9:11])
minute = int(line[12:14])
seconds = int(line[15:17])
#if minute == startingMeasuresMinute:
#if seconds>= startingMeasuresSecond:
match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
s=line[match.end():]
#print s
if s in list:
#print "in"
if not line[match.end():] in alreadySeen:
alreadySeen.append(line[match.end():])
counter= counter+1
#print counter, refString
#if minute > startingMeasuresMinute and minute <= startingMeasuresMinute+1:
# if seconds < startingMeasuresSecond:
# match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
# if line[match.end():] in list:
# if not line[match.end():] in alreadySeen:
# alreadySeen.append(line[match.end():])
# counter= counter+1
#if startingMeasuresMinute==59:
# if houre > startingMeasuresHoure and minute < 1:
# if seconds < startingMeasuresSecond:
# match = re.search(r"[^a-zA-Z](node)[^a-zA-Z]", line)
# if line[match.end():] in list:
# if not line[match.end():] in alreadySeen:
# alreadySeen.append(line[match.end():])
# counter= counter+1
return counter
def count(k,i, p):
listClientHandle=[]
listRTHandle=[]
listRTHandleBeforeCommit=[]
listSCHandle = []
listSCTringer = []
listCommit = []
listTest = []
totalNBNM = i
theoricalNBHB = float(totalNBNM)*60/3
count=0
threads=[]
for runNB in range(1,6):
count=count+1
listHb=[]
clientThreads = []
rtThreads = []
scThreads = []
clientHandle=0
resourceTrackerHandle=0
schedulerHandle=0
for j in range(0,k+1):
fileName = 'results2/xp_rt' + str(j) + '_' + str(k) + '_' + str(i) + '_' + str(runNB) + '.log'
#print fileName
clientHandle = clientHandle + parsAndFillList(fileName, "sent heartbeat", listHb)
#print clientHandle
#print len(listHb)
#resourceTrackerHandle =resourceTrackerHandle + countFromList(fileName, "persisting heartbeat", listHb)
fileName = 'results2/xp_scheduler_' + str(k) + '_' + str(i) + '_' + str(runNB) + '.log'
schedulerHandle = countFromList(fileName, "hb handled",listHb)
listSCHandle.append(schedulerHandle)
#listRTHandle.append(resourceTrackerHandle)
listClientHandle.append(clientHandle)
#print "rt"
#averageResourceTrackerHandle=avgList(listRTHandle)
#print "sc"
averageSchedulerHandle=avgList(listSCHandle)
#print "client"
averageClientHandle=avgList(listClientHandle)
#averagePercentageHBRT=float(averageResourceTrackerHandle)/theoricalNBHB
averagePrecentageHBSC=float(averageSchedulerHandle)/theoricalNBHB
averagepercentageHBClient=float(averageClientHandle)/theoricalNBHB
if(p!=None):
p.join()
print totalNBNM, averageSchedulerHandle, averageClientHandle, theoricalNBHB, averagepercentageHBClient,averagePrecentageHBSC
#result=str(totalNBNM) + "\t" + str(averageSchedulerHandle) + "\t" + str(averageClientHandle) + "\t" + str(theoricalNBHB) + "\t" + str(averagepercentageHBClient) + "\t" + str(averagePrecentageHBSC)
for k in range(0,1):
print k
listProcess=[]
p=None
for i in [2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000,9500,10000,11000,12000,14000,15000,16000,17000,18000,19000,20000,30000]:
#,40000,50000,60000,70000,80000,90000,100000]:
p=Process(target=count,args=(k,i,p))
p.start()
listProcess.append(p)
for p in listProcess:
p.join()
#print t.getString()
|
srijeyanthan/hadoop_2_4_0_experimental_version
|
scripts/deployment_scripts/parslogs_mt.py
|
Python
|
apache-2.0
| 7,713
|
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab
import json
##################################################
class JobSpecification(object):
Name = ""
def __init__(self, *, InputKey, OutputKeyPrefix):
if not self.Name:
self.Name = self.__class__.__name__.replace('Job', '')
self.InputKey = InputKey
self.OutputKeyPrefix = OutputKeyPrefix
def ToJSON(self):
# Validate that the basic fields are there
if not (self.InputKey or self.OutputKeyPrefix):
raise Exception("InputKey and OutputKeyPrefix are required fields.")
# Prepare params to configure job
Params = {
"InputKey": self.InputKey,
"OutputKeyPrefix": self.OutputKeyPrefix,
}
# Update with passed in "ExtraParams"
if isinstance(self.ExtraParams, dict):
Params.update(self.ExtraParams)
# Prepare the JSON to return
return json.dumps({
"Type": "Job",
"Job": self.Name,
"Params": Params
})
##################################################
class TranscodeVideoJob(JobSpecification):
Name = "TranscodeVideo"
@property
def ExtraParams(self):
return {
"OutputFormats": ('webm', 'mp4')
}
##################################################
class ConvertToPDFJob(JobSpecification):
Name = "ConvertToPDF"
@property
def ExtraParams(self):
return {
"OutputKey": "output.pdf"
}
##################################################
class ResizeImageJob(JobSpecification):
@property
def ExtraParams(self):
return {
"PreferredOutputs": (
(0, 0, 'Original.jpg'),
(1200, 1200, 'Regular.jpg'),
(480, 480, 'Small.jpg'),
(160, 160, 'Thumbnail.jpg'),
)}
##################################################
class NormalizeImageJob(JobSpecification):
Name = "NormalizeImage"
@property
def ExtraParams(self):
return {
"PreferredOutputs": ((200, 200, '200x200-nomarlized.jpg'), (300, 300, '300x300-normalized.jpg'),)
}
|
appcove/DocStruct
|
Python/DocStruct/JobSpecification.py
|
Python
|
apache-2.0
| 1,993
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint"""
import datetime
import httplib
import random
import StringIO
import boto
from boto.ec2 import regioninfo
from boto import exception as boto_exc
import webob
from nova import block_device
from nova import context
from nova import exception
from nova import test
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer"""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the httplib.HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado"""
pass
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.TestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertDictMatch(out_dict, expected_dict)
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
expected_result)
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt,
ec2.Requestify(ec2.Authorizer(ec2.Executor()),
'nova.api.ec2.cloud.CloudController'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
if boto.Version >= '2':
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_return_valid_isoformat(self):
"""
Ensure that the ec2 api returns datetime in xs:dateTime
(which apparently isn't datetime.isoformat())
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
"""
conv = apirequest._database_to_isoformat
# sqlite database representation with microseconds
time_to_convert = datetime.datetime.strptime(
"2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(
conv(time_to_convert),
'2011-02-21T20:14:10.634Z')
# mysqlite database representation
time_to_convert = datetime.datetime.strptime(
"2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
self.assertEqual(
conv(time_to_convert),
'2011-02-21T19:56:18.000Z')
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
"""Attempt to terminate an invalid instance"""
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
# NOTE(vish): create depends on pool, so call helper directly
cloud._gen_key(context.get_admin_context(), 'fake', keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEquals(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely"""
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
# NOTE(vish): create depends on pool, so call helper directly
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError, e:
if e.code == 'KeyPairExists':
pass
else:
self.fail("Unexpected EC2ResponseError: %s "
"(expected KeyPairExists)" % e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
"""Test that we can retrieve security groups"""
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 1)
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
"""Test that we can create a security group"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 2)
self.assertTrue(security_group_name in [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
""" Test that we sanely handle invalid security group names.
API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. """
self.expect_http()
self.mox.ReplayAll()
# Test block group_name of non alphanumeric characters, spaces,
# dashes, and underscores.
security_group_name = "aa #^% -=99"
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 chars """
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""
Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message, e.error_message)
else:
raise self.failureException, 'EC2ResponseError not raised'
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('An unknown error has occurred', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('An unknown error has occurred', 'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('An unknown error has occurred', 'icmp', " ", "81",
'0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 8)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
return
def test_authorize_revoke_security_group_cidr_v6(self):
"""
Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEquals(len(group.rules), 1)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
return
def test_authorize_revoke_security_group_foreign_group(self):
"""
Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEquals(len(group.rules), 3)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
(other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
|
usc-isi/essex-baremetal-support
|
nova/tests/test_api.py
|
Python
|
apache-2.0
| 22,746
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import
import copy
import os
import re
import sys
import time
import errno
import logging
import tempfile
import multiprocessing
import traceback
# Import third party libs
import zmq
from Crypto.PublicKey import RSA
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.acl
import salt.engines
import salt.fileserver
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.job
import salt.utils.reactor
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.process
import salt.utils.zeromq
import salt.utils.jid
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import FileserverConfigError
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.master import ConnectedCache
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(multiprocessing.Process):
'''
A generalized maintenance process which performances maintenance
routines.
'''
def __init__(self, opts):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__()
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Init fileserver manager
self.fileserver = salt.fileserver.Fileserver(self.opts)
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
# Set up search object
self.search = salt.search.Search(self.opts)
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.appendproctitle('Maintenance')
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
# Clean out pub auth
salt.daemons.masterapi.clean_pub_auth(self.opts)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
self.handle_search(now, last)
self.handle_git_pillar()
self.handle_schedule()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.daemons.masterapi.fileserver_update(self.fileserver)
salt.utils.verify.check_max_open_files(self.opts)
last = now
try:
time.sleep(self.loop_interval)
except KeyboardInterrupt:
break
def handle_search(self, now, last):
'''
Update the search index
'''
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
self.search.index()
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
if stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = secret_map['reload']()
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.update()
except Exception as exc:
log.error(
'Exception \'{0}\' caught while updating git_pillar'
.format(exc),
exc_info_on_loglevel=logging.DEBUG
)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.opts.get('presence_events', False):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
# Warn if ZMQ < 3.2
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({1})'.format(err)
)
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
prev_umask = os.umask(0o077)
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
os.umask(prev_umask)
except OSError:
pass
non_legacy_git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
if non_legacy_git_pillars:
new_opts = copy.deepcopy(self.opts)
new_opts['ext_pillar'] = non_legacy_git_pillars
try:
# Init any values needed by the git ext pillar
salt.utils.gitfs.GitPillar(new_opts)
except FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# run_reqserver cannot be defined within a class method in order for it
# to be picklable.
def run_reqserver(self):
reqserv = ReqServer(
self.opts,
self.key,
self.master_key)
reqserv.run()
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user {0!r}'.format(salt.utils.get_user())
)
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
log.info('Creating master process manager')
process_manager = salt.utils.process.ProcessManager()
log.info('Creating master maintenance process')
pub_channels = []
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(process_manager)
pub_channels.append(chan)
log.info('Creating master event publisher process')
process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
salt.engines.start_engines(self.opts, process_manager)
# must be after channels
process_manager.add_process(Maintenance, args=(self.opts,))
log.info('Creating master publisher process')
if self.opts.get('reactor'):
log.info('Creating master reactor process')
process_manager.add_process(salt.utils.reactor.Reactor, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: {0}'.format(proc))
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error(('Error creating ext_processes '
'process: {0}').format(proc))
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
process_manager.add_process(ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
process_manager.add_process(self.run_reqserver)
try:
process_manager.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
process_manager.kill_children()
raise SystemExit('\nExiting on Ctrl-c')
class Halite(multiprocessing.Process):
'''
Manage the Halite server
'''
def __init__(self, hopts):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__()
self.hopts = hopts
def run(self):
'''
Fire up halite!
'''
salt.utils.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
# TODO: move to utils??
def iter_transport_opts(opts):
'''
Yield transport, opts for all master configured transports
'''
transports = set()
for transport, opts_overrides in six.iteritems(opts.get('transport_opts', {})):
t_opts = dict(opts)
t_opts.update(opts_overrides)
t_opts['transport'] = transport
transports.add(transport)
yield transport, t_opts
if opts['transport'] not in transports:
yield opts['transport'], opts
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
req_channels = []
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
for ind in range(int(self.opts['worker_threads'])):
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
),
)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
try:
self.__bind()
except KeyboardInterrupt:
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
def destroy(self):
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# Also stop the workers
if hasattr(self, 'process_manager'):
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
multiprocessing.Process.__init__(self)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
multiprocessing.Process.__init__(self)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets}
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
self.io_loop.start()
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load), {'fun': 'send_clear'}
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.trace('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def run(self):
'''
Start a Master Worker
'''
salt.utils.appendproctitle(self.__class__.__name__)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.reinit_crypto()
self.__bind()
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_hash = self.fs_.file_hash
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
with salt.utils.fopen(tmp_pub) as fp_:
pub = RSA.importKey(fp_.read())
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if salt.crypt.public_decrypt(pub, token) == 'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return False
if 'tok' in load:
load.pop('tok')
return load
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._ext_nodes(load, skip_verify=True)
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
# Normalize Windows paths
normpath = load['path']
if ':' in normpath:
# make sure double backslashes are normalized
normpath = normpath.replace('\\', '/')
normpath = os.path.normpath(normpath)
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar_dirs = {}
# pillar = salt.pillar.Pillar(
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'))
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
# On Windows, os.rename will fail if the destination file exists.
salt.utils.atomicfile.atomic_rename(tmpfname, datap)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [{minion}]: {data}'
.format(minion=id_, data=load['data']['message'])
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) {0} for job {1}: {2}'
.format(minions, jid, exc)
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: {0}'.format(load))
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.fopen(syndic_cache_path, 'w') as f:
f.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load['jid']))
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call {0} took {1} seconds'.format(
func, time.time() - start
)
)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def process_token(self, tok, fun, auth_type):
'''
Process a token and determine if a command is authorized
'''
try:
token = self.loadauth.get_tok(tok)
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
check_fun = getattr(self.ckminions,
'{auth}_check'.format(auth=auth_type))
if token['name'] in self.opts['external_auth'][token['eauth']]:
good = check_fun(self.opts['external_auth'][token['eauth']][token['name']], fun)
elif any(key.endswith('%') for key in self.opts['external_auth'][token['eauth']]):
for group in self.opts['external_auth'][token['eauth']]:
if group.endswith('%'):
for group in self.opts['external_auth'][token['eauth']]:
good = check_fun(self.opts['external_auth'][token['eauth']][group], fun)
if good:
break
else:
good = check_fun(self.opts['external_auth'][token['eauth']]['*'], fun)
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
return None
def process_eauth(self, clear_load, auth_type):
'''
Process a clear load to determine eauth perms
Any return other than None is an eauth failure
'''
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
check_fun = getattr(self.ckminions,
'{auth}_check'.format(auth=auth_type))
if name in self.opts['external_auth'][clear_load['eauth']]:
good = check_fun(self.opts['external_auth'][clear_load['eauth']][name], clear_load['fun'])
elif any(key.endswith('%') for key in self.opts['external_auth'][clear_load['eauth']]):
for group in self.opts['external_auth'][clear_load['eauth']]:
if group.endswith('%'):
good = check_fun(self.opts['external_auth'][clear_load['eauth']][group], clear_load['fun'])
if good:
break
else:
good = check_fun(self.opts['external_auth'][clear_load['eauth']]['*'], clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
return None
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
if 'token' in clear_load:
auth_error = self.process_token(clear_load['token'],
clear_load['fun'],
'runner')
if auth_error:
return auth_error
else:
token = self.loadauth.get_tok(clear_load.pop('token'))
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
clear_load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
try:
eauth_error = self.process_eauth(clear_load, 'runner')
if eauth_error:
return eauth_error
# No error occurred, consume the password from the clear_load if
# passed
clear_load.pop('password', None)
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
clear_load.pop('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
auth_error = self.process_token(clear_load['token'],
clear_load['fun'],
'wheel')
if auth_error:
return auth_error
else:
token = self.loadauth.get_tok(clear_load.pop('token'))
jid = salt.utils.jid.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
try:
eauth_error = self.process_eauth(clear_load, 'wheel')
if eauth_error:
return eauth_error
# No error occurred, consume the password from the clear_load if
# passed
clear_load.pop('password', None)
jid = salt.utils.jid.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': clear_load.pop('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
groups = self.loadauth.get_groups(clear_load)
eauth_config = self.opts['external_auth'][clear_load['eauth']]
if '*' not in eauth_config and name not in eauth_config:
found = False
for group in groups:
if "{0}%".format(group) in eauth_config:
found = True
break
if not found:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
clear_load['groups'] = groups
return self.loadauth.mk_token(clear_load)
except Exception as exc:
type_, value_, traceback_ = sys.exc_info()
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
log.error(traceback.format_exception(type_, value_, traceback_))
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
client_acl = salt.acl.ClientACL(self.opts['client_acl_blacklist'])
if client_acl.user_is_blacklisted(clear_load['user']) or \
client_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
# Bail if the token is empty or if the eauth type specified is not allowed
if not token or token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
# Fetch eauth config and collect users and groups configured for access
eauth_config = self.opts['external_auth'][token['eauth']]
eauth_users = []
eauth_groups = []
for entry in eauth_config:
if entry.endswith('%'):
eauth_groups.append(entry.rstrip('%'))
else:
eauth_users.append(entry)
# If there are groups in the token, check if any of them are listed in the eauth config
group_auth_match = False
try:
if token.get('groups'):
for group in token['groups']:
if group in eauth_groups:
group_auth_match = True
break
except KeyError:
pass
if '*' not in eauth_users and token['name'] not in eauth_users and not group_auth_match:
log.warning('Authentication failure of type "token" occurred.')
return ''
# Compile list of authorized actions for the user
auth_list = []
# Add permissions for '*' or user-specific to the auth list
for user_key in ('*', token['name']):
auth_list.extend(eauth_config.get(user_key, []))
# Add any add'l permissions allowed by group membership
if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(eauth_config, token['groups'], auth_list)
log.trace("Compiled auth_list: {0}".format(auth_list))
good = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
clear_load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
if groups is None:
groups = []
group_perm_keys = [item for item in self.opts['external_auth'][extra['eauth']] if item.endswith('%')] # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
for group in groups:
if group == group_config:
group_auth_match = True
# If a group_auth_match is set it means only that we have a
# user which matches at least one or more of the groups defined
# in the configuration file.
external_auth_in_db = False
for d in self.opts['external_auth'][extra['eauth']]:
if d.startswith('^'):
external_auth_in_db = True
# If neither a catchall, a named membership or a group
# membership is found, there is no need to continue. Simply
# deny the user access.
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']]) |
group_auth_match | external_auth_in_db):
# A group def is defined and the user is a member
#[group for groups in ['external_auth'][extra['eauth']]]):
# Auth successful, but no matching user found in config
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
# Perform the actual authentication. If we fail here, do not
# continue.
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
type_, value_, traceback_ = sys.exc_info()
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
log.error(traceback.format_exception(
type_, value_, traceback_))
return ''
# auth_list = self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*']
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = []
if name in self.opts['external_auth'][extra['eauth']]:
auth_list = self.opts['external_auth'][extra['eauth']][name]
if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(self.opts['external_auth'][extra['eauth']], groups, auth_list)
good = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
clear_load['user'] = name
# Verify that the caller has root on master
elif 'user' in clear_load:
auth_user = salt.auth.AuthUser(clear_load['user'])
if auth_user.is_sudo():
# If someone sudos check to make sure there is no ACL's around their username
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if self.opts['sudo_acl'] and self.opts['client_acl']:
good = self.ckminions.auth_check(
self.opts['client_acl'].get(clear_load['user'].split('_', 1)[-1]),
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root') or clear_load['user'] == 'root':
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif auth_user.is_running_user():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[salt.utils.get_user()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# FIXME Needs additional refactoring
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {}
payload = self._prep_pub(minions, jid, clear_load, extra)
# Send it!
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
def _prep_pub(self, minions, jid, clear_load, extra):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
return load
class FloMWorker(MWorker):
'''
Change the run and bind to be ioflo friendly
'''
def __init__(self,
opts,
key,
):
MWorker.__init__(self, opts, key)
def setup(self):
'''
Prepare the needed objects and socket for iteration within ioflo
'''
salt.utils.appendproctitle(self.__class__.__name__)
self.clear_funcs = salt.master.ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('ZMQ Worker binding to socket {0}'.format(self.w_uri))
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.socket.connect(self.w_uri)
def handle_request(self):
'''
Handle a single request
'''
try:
polled = self.poller.poll(1)
if polled:
package = self.socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
self.socket.send(ret)
except KeyboardInterrupt:
raise
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
return
|
smallyear/linuxLearn
|
salt/salt/master.py
|
Python
|
apache-2.0
| 85,621
|
#
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import time
import boto3
import uuid
import numpy as np
import time
import pywren
import pywren.runtime
import subprocess
import logging
from six.moves import cPickle as pickle
import unittest
import numpy as np
from flaky import flaky
import sys
class CantPickle(object):
"""
Some objects can't be pickled, and either fail at
save or restore. We need to catch these and, as
the Hydraulic press guy says, "deal with it"
"""
def __init__(self, foo, dump_fail=False, load_fail=False):
self.foo = foo
self.dump_fail = dump_fail
self.load_fail = load_fail
def __getstate__(self):
print("getstate called")
if self.dump_fail:
raise Exception("cannot pickle dump this object")
return {'foo' : self.foo,
'dump_fail' : self.dump_fail,
'load_fail' : self.load_fail}
def __setstate__(self, arg):
print("setstate called")
if arg['load_fail']:
raise Exception("cannot pickle load this object")
self.load_fail = arg['load_fail']
self.dump_fail = arg['dump_fail']
self.foo = arg['foo']
class CantPickleException(Exception):
"""
Some objects can't be pickled, and either fail at
save or restore. We need to catch these and, as
the Hydraulic press guy says, "deal with it"
"""
def __init__(self, foo, dump_fail=False, load_fail=False,
skip_super = False):
if not skip_super :
super(Exception, self).__init__(str(foo))
self.foo = foo
self.dump_fail = dump_fail
self.load_fail = load_fail
def __getstate__(self):
print("getstate called")
if self.dump_fail:
raise Exception("cannot pickle dump this object")
return {'foo' : self.foo,
'dump_fail' : self.dump_fail,
'load_fail' : self.load_fail}
def __setstate__(self, arg):
print("setstate called")
if arg['load_fail']:
raise Exception("cannot pickle load this object")
self.load_fail = arg['load_fail']
self.dump_fail = arg['dump_fail']
self.foo = arg['foo']
class PickleSafety(unittest.TestCase):
def setUp(self):
self.wrenexec = pywren.default_executor()
def test_subprocess_fail(self):
"""
Subprocess command-not-found fails
"""
def uname(x):
return subprocess.check_output("fakecommand", shell=True)
fut = self.wrenexec.call_async(uname, None)
with pytest.raises(Exception) as execinfo:
res = fut.result()
assert "Command 'fakecommand' returned" in str(execinfo.value)
'''
def test_unpickleable_return_dump(self):
def f(x):
cp = CantPickle(x, dump_fail = True)
return cp
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(f, None)
with pytest.raises(Exception) as execinfo:
res = fut.result()
print(str(execinfo.value))
assert 'cannot pickle dump this object' in str(execinfo.value)
def test_unpickleable_return_load(self):
def f(x):
cp = CantPickle(x, load_fail = True)
return cp
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(f, None)
with pytest.raises(Exception) as execinfo:
res = fut.result()
assert 'cannot pickle load this object' in str(execinfo.value)
def test_unpickleable_raise_except_dump(self):
def f(x):
cp = CantPickleException(x, dump_fail = True)
raise cp
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(f, None)
with pytest.raises(CantPickleException) as execinfo:
res = fut.result()
#assert 'cannot pickle dump this object' in str(execinfo.value)
'''
def test_unpickleable_raise_except_load(self):
def f(x):
cp = CantPickleException(x, load_fail = True)
raise cp
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(f, None)
with pytest.raises(Exception) as execinfo:
res = fut.result()
def test_unpickleable_raise_except_nosuper(self):
def f(x):
cp = CantPickleException(x, skip_super = True)
raise cp
wrenexec = pywren.default_executor()
fut = self.wrenexec.call_async(f, "Fun exception")
with pytest.raises(Exception) as execinfo:
res = fut.result()
assert 'Fun exception' in str(execinfo.value)
|
pywren/pywren
|
tests/test_pickle.py
|
Python
|
apache-2.0
| 5,292
|
import ovlib.verb
from ovlib.dispatcher import dispatcher, command, Dispatcher
from ovlib.wrapper import ObjectWrapper, ListObjectWrapper, wrapper
from ovirtsdk4.types import VnicProfile
from ovirtsdk4.writers import VnicProfileWriter
from ovirtsdk4.services import VnicProfilesService, VnicProfileService, AssignedVnicProfileService, AssignedVnicProfilesService
@wrapper(writer_class=VnicProfileWriter, type_class=VnicProfile, service_class=AssignedVnicProfileService)
class AssignedVnicProfileWrapper(ObjectWrapper):
pass
@wrapper(service_class=AssignedVnicProfilesService)
class AssignedVnicProfilesWrapper(ListObjectWrapper):
pass
@wrapper(writer_class=VnicProfileWriter, type_class=VnicProfile, service_class=VnicProfileService)
class VnicProfileWrapper(ObjectWrapper):
pass
@wrapper(service_class=VnicProfilesService, service_root='vnicprofiles')
class VnicProfilesWrapper(ListObjectWrapper):
pass
@dispatcher(object_name='vnicprofile', wrapper=VnicProfileWrapper, list_wrapper=VnicProfilesWrapper,)
class VnicProfileDispatcher(Dispatcher):
pass
@command(VnicProfileDispatcher)
class VnicProfileList(ovlib.verb.List):
pass
@command(VnicProfileDispatcher)
class VnicProfileExport(ovlib.verb.XmlExport):
pass
@command(VnicProfileDispatcher)
class VnicProfileRemove(ovlib.verb.Remove):
pass
|
fbacchella/ovirtcmd
|
ovlib/vnicprofile/__init__.py
|
Python
|
apache-2.0
| 1,344
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetTagTemplate
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_v1_generated_DataCatalog_GetTagTemplate_async]
from google.cloud import datacatalog_v1
async def sample_get_tag_template():
# Create a client
client = datacatalog_v1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1.GetTagTemplateRequest(
name="name_value",
)
# Make the request
response = await client.get_tag_template(request=request)
# Handle the response
print(response)
# [END datacatalog_v1_generated_DataCatalog_GetTagTemplate_async]
|
googleapis/python-datacatalog
|
samples/generated_samples/datacatalog_v1_generated_data_catalog_get_tag_template_async.py
|
Python
|
apache-2.0
| 1,485
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import numpy as np
import tensorflow as tf
import niftynet.io.image_loader as image_loader
from tests.niftynet_testcase import NiftyNetTestCase
CASE_NIBABEL_3D = 'testing_data/FLAIR_1023.nii.gz'
CASE_LOGO_2D = 'niftynet-logo.png'
class ImageLoaderTest(NiftyNetTestCase):
def test_nibabel_3d(self):
data = image_loader.load_image_obj(CASE_NIBABEL_3D).get_data()
self.assertAllClose(data.shape, (256, 168, 256))
def load_2d_image(self, loader=None):
data = image_loader.load_image_obj(CASE_LOGO_2D, loader=loader).get_data()
self.assertAllClose(data.shape, (400, 677, 1, 1, 4))
def test_convert_bool(self):
boolarr=np.ones((256,256,256),np.bool)
img=image_loader.image2nibabel(boolarr)
def test_2d_loaders(self):
with self.assertRaisesRegexp(ValueError, ''):
self.load_2d_image('test')
self.load_2d_image()
for _loader in image_loader.AVAILABLE_LOADERS.keys():
print('testing {}'.format(_loader))
if _loader == 'nibabel':
continue # skip nibabel for 2d image
if _loader == 'dummy':
continue # skip the toy example
self.load_2d_image(_loader)
def test_all_data(self):
folder = 'testing_data'
all_files = [
os.path.join(folder, f)
for f in os.listdir(folder)
if os.path.isfile(os.path.join(folder, f))]
for f in all_files:
if f.endswith('nii.gz'):
loaded_shape = image_loader.load_image_obj(f).get_data().shape
print(loaded_shape)
self.assertGreaterEqual(5, len(loaded_shape))
else:
with self.assertRaisesRegexp(ValueError, ''):
image_loader.load_image_obj(f)
if __name__ == "__main__":
tf.test.main()
|
NifTK/NiftyNet
|
tests/image_loader_test.py
|
Python
|
apache-2.0
| 1,953
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from savanna.db_new.sqlalchemy import model_base as mb
from savanna.db_new.sqlalchemy import types as st
from savanna.openstack.common import uuidutils
from savanna.utils import crypto
## Helpers
def _generate_unicode_uuid():
return unicode(uuidutils.generate_uuid())
def _id_column():
return sa.Column(sa.String(36),
primary_key=True,
default=_generate_unicode_uuid)
## Main objects: Cluster, NodeGroup, Instance
class Cluster(mb.SavannaBase):
"""Contains all info about cluster."""
__tablename__ = 'clusters'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
tenant_id = sa.Column(sa.String(36))
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = sa.Column(sa.String(80), nullable=False)
cluster_configs = sa.Column(st.JsonDictType())
default_image_id = sa.Column(sa.String(36))
anti_affinity = sa.Column(st.JsonListType())
private_key = sa.Column(sa.Text, default=crypto.generate_private_key())
user_keypair_id = sa.Column(sa.String(80))
status = sa.Column(sa.String(80))
status_description = sa.Column(sa.String(200))
info = sa.Column(st.JsonDictType())
node_groups = relationship('NodeGroup', cascade="all,delete",
backref='cluster', lazy='joined')
cluster_template_id = sa.Column(sa.String(36),
sa.ForeignKey('cluster_templates.id'))
cluster_template = relationship('ClusterTemplate',
backref="clusters", lazy='joined')
def to_dict(self):
d = super(Cluster, self).to_dict()
d['node_groups'] = [ng.to_dict() for ng in self.node_groups]
return d
class NodeGroup(mb.SavannaBase):
"""Specifies group of nodes within a cluster."""
__tablename__ = 'node_groups'
__table_args__ = (
sa.UniqueConstraint('name', 'cluster_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
flavor_id = sa.Column(sa.String(36), nullable=False)
image_id = sa.Column(sa.String(36))
node_processes = sa.Column(st.JsonListType())
node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer)
volume_mount_prefix = sa.Column(sa.String(80))
count = sa.Column(sa.Integer, nullable=False)
instances = relationship('Instance', cascade="all,delete",
backref='node_group',
order_by="Instance.instance_name", lazy='joined')
cluster_id = sa.Column(sa.String(36), sa.ForeignKey('clusters.id'))
node_group_template_id = sa.Column(sa.String(36),
sa.ForeignKey(
'node_group_templates.id'))
node_group_template = relationship('NodeGroupTemplate',
backref="node_groups", lazy='joined')
def to_dict(self):
d = super(NodeGroup, self).to_dict()
d['instances'] = [i.to_dict() for i in self.instances]
return d
class Instance(mb.SavannaBase):
"""An OpenStack instance created for the cluster."""
__tablename__ = 'instances'
__table_args__ = (
sa.UniqueConstraint('instance_id', 'node_group_id'),
)
id = _id_column()
node_group_id = sa.Column(sa.String(36), sa.ForeignKey('node_groups.id'))
instance_id = sa.Column(sa.String(36))
instance_name = sa.Column(sa.String(80), nullable=False)
internal_ip = sa.Column(sa.String(15))
management_ip = sa.Column(sa.String(15))
volumes = sa.Column(st.JsonListType())
## Template objects: ClusterTemplate, NodeGroupTemplate, TemplatesRelation
class ClusterTemplate(mb.SavannaBase):
"""Template for Cluster."""
__tablename__ = 'cluster_templates'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
cluster_configs = sa.Column(st.JsonDictType())
default_image_id = sa.Column(sa.String(36))
anti_affinity = sa.Column(st.JsonListType())
tenant_id = sa.Column(sa.String(36))
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = sa.Column(sa.String(80), nullable=False)
node_groups = relationship('TemplatesRelation', cascade="all,delete",
backref='cluster_template', lazy='joined')
def to_dict(self):
d = super(ClusterTemplate, self).to_dict()
d['node_groups'] = [tr.to_dict() for tr in
self.node_groups]
return d
class NodeGroupTemplate(mb.SavannaBase):
"""Template for NodeGroup."""
__tablename__ = 'node_group_templates'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
tenant_id = sa.Column(sa.String(36))
flavor_id = sa.Column(sa.String(36), nullable=False)
image_id = sa.Column(sa.String(36))
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = sa.Column(sa.String(80), nullable=False)
node_processes = sa.Column(st.JsonListType())
node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer)
volume_mount_prefix = sa.Column(sa.String(80))
class TemplatesRelation(mb.SavannaBase):
"""NodeGroupTemplate - ClusterTemplate relationship.
In fact, it's a template of NodeGroup in Cluster.
"""
__tablename__ = 'templates_relations'
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
flavor_id = sa.Column(sa.String(36), nullable=False)
image_id = sa.Column(sa.String(36))
node_processes = sa.Column(st.JsonListType())
node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer)
volume_mount_prefix = sa.Column(sa.String(80))
count = sa.Column(sa.Integer, nullable=False)
cluster_template_id = sa.Column(sa.String(36),
sa.ForeignKey('cluster_templates.id'))
node_group_template_id = sa.Column(sa.String(36),
sa.ForeignKey(
'node_group_templates.id'))
node_group_template = relationship('NodeGroupTemplate',
backref="templates_relations",
lazy='joined')
## EDP objects: DataSource, JobOrigin, Job, Job Execution
class DataSource(mb.SavannaBase):
"""DataSource - represent a diffident types of data source,
e.g. Swift, Cassandra etc.
"""
__tablename__ = 'data_sources'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
tenant_id = sa.Column(sa.String(36))
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text())
type = sa.Column(sa.String(80), nullable=False)
url = sa.Column(sa.String(256), nullable=False)
credentials = sa.Column(st.JsonDictType())
class Job(mb.SavannaBase):
"""Job - represent a job object, to start job
user should provide a valid data input and output.
"""
__tablename__ = 'jobs'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
tenant_id = sa.Column(sa.String(36))
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text())
type = sa.Column(sa.String(80), nullable=False)
# job_origin_id = sa.Column(sa.String(36),
# sa.ForeignKey('job_origins.id'))
input_type = sa.Column(sa.String(80), nullable=False)
output_type = sa.Column(sa.String(80), nullable=False)
class JobExecution(mb.SavannaBase):
"""JobExecution - represent a job execution of specific cluster
"""
__tablename__ = 'job_executions'
id = _id_column()
tenant_id = sa.Column(sa.String(36))
job_id = sa.Column(sa.String(36),
sa.ForeignKey('jobs.id'))
input_id = sa.Column(sa.String(36),
sa.ForeignKey('data_sources.id'))
output_id = sa.Column(sa.String(36),
sa.ForeignKey('data_sources.id'))
start_time = sa.Column(sa.Date())
end_time = sa.Column(sa.Date())
cluster_id = sa.Column(sa.String(36),
sa.ForeignKey('clusters.id'))
progress = sa.Column(sa.Float)
oozie_job_id = sa.Column(sa.String(100))
return_code = sa.Column(sa.String(80))
|
rnirmal/savanna
|
savanna/db_new/sqlalchemy/models.py
|
Python
|
apache-2.0
| 9,551
|
# Generated by Django 2.1.8 on 2019-05-02 23:22
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import django.utils.timezone
import gwells.db_comments.model_mixins
import wells.data_migrations
class Migration(migrations.Migration):
dependencies = [
('wells', '0001_squashed_0079_auto_20190506_1959'),
]
operations = [
migrations.CreateModel(
name='WellDisinfectedCode',
fields=[
('create_user', models.CharField(max_length=60)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('update_user', models.CharField(max_length=60)),
('update_date', models.DateTimeField(default=django.utils.timezone.now)),
('display_order', models.PositiveIntegerField()),
('effective_date', models.DateTimeField(default=django.utils.timezone.now)),
('expiry_date', models.DateTimeField(default=datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=utc))),
('well_disinfected_code', models.CharField(editable=False, max_length=100, primary_key=True, serialize=False)),
('description', models.CharField(max_length=100)),
],
options={
'db_table': 'well_disinfected_code',
'ordering': ['display_order', 'description'],
},
bases=(models.Model, gwells.db_comments.model_mixins.DBComments),
),
migrations.RunPython(
code=wells.data_migrations.load_well_disinfected_codes,
reverse_code=wells.data_migrations.unload_well_disinfected_codes,
),
migrations.AddField(
model_name='activitysubmission',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.CASCADE, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
migrations.AddField(
model_name='well',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.CASCADE, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
]
|
bcgov/gwells
|
app/backend/wells/migrations/0080_add_well_disinfect_status.py
|
Python
|
apache-2.0
| 2,423
|
from model.group import Group
import re
from timeit import timeit
def test_group_list(app, db):
print(timeit(lambda: app.group.get_group_list(), number=1))
def clean(group):
return Group(id=group.id, name=re.sub('\s+', ' ', group.name.strip()))
print(timeit(lambda: map(clean, db.get_group_list()), number=1000))
assert False # sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
|
ble669/python_training
|
test/test_db_matches_ui.py
|
Python
|
apache-2.0
| 434
|
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument ('-n', '--num', type=float, default=float('-inf'), help='a number of your choice')
args = parser.parse_args()
num = args.num
if num == float('-inf'):
print('You should have made a choice')
elif num < float('inf'):
print('Maybe should have chosen a bigger number')
else:
print('This condition should never be true')
|
tri2sing/IntroPython
|
Conditionals.py
|
Python
|
apache-2.0
| 468
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event._update_cms_form'
db.add_column('webinars_event', '_update_cms_form', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['webinars.CmsForm']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Event._update_cms_form'
db.delete_column('webinars_event', '_update_cms_form_id')
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'prevent_unformed_lead_import': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accountsync': {
'Meta': {'object_name': 'AccountSync'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.HubSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.accountsyncshard': {
'Meta': {'object_name': 'AccountSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accountsyncstage': {
'Meta': {'object_name': 'AccountSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_attended_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_noshow_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_update_cms_form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.CmsForm']"}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'cms_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'through': "orm['webinars.EventForm']", 'symmetrical': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'mothballed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'sync_leads_for_all_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'unknowable_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.eventform': {
'Meta': {'object_name': 'EventForm'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'converted_at_cutoff': ('sanetime.dj.SaneTimeField', [], {'default': '0'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'})
},
'webinars.eventsync': {
'Meta': {'object_name': 'EventSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.eventsyncshard': {
'Meta': {'object_name': 'EventSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'_attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspoteventsyncstage': {
'Meta': {'object_name': 'HubSpotEventSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'event_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventForm']"}),
'finish_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_last_modified_at': ('sanetime.dj.SaneTimeField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubsync': {
'Meta': {'object_name': 'HubSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.stagedhubspotregistrant': {
'Meta': {'object_name': 'StagedHubSpotRegistrant'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'converted_at': ('sanetime.dj.SaneTimeField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.stagedwebexevent': {
'Meta': {'object_name': 'StagedWebexEvent'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.stagedwebexregistrant': {
'Meta': {'object_name': 'StagedWebexRegistrant'},
'attendee_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexeventsyncstage': {
'Meta': {'object_name': 'WebexEventSyncStage'},
'attendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
|
prior/webinars
|
webinars_web/webinars/migrations/0037_auto__add_field_event__update_cms_form.py
|
Python
|
apache-2.0
| 25,544
|
# -*- coding: utf-8 -*-
# File: input_source_base.py
from abc import ABCMeta, abstractmethod
import copy
import six
from six.moves import zip
from contextlib import contextmanager
import tensorflow as tf
from ..utils.argtools import memoized, call_only_once
from ..callbacks.base import CallbackFactory
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
__all__ = ['InputSource', 'remap_input_source']
def get_tensors_inputs(placeholders, tensors, names):
"""
Args:
placeholders (list[Tensor]):
tensors (list[Tensor]): list of tf.Tensor
names (list[str]): names matching the tensors
Returns:
list[Tensor]: inputs to used with build_graph(),
with the corresponding placeholders replaced by tensors.
"""
assert len(tensors) == len(names), \
"Input tensors {} and input names {} have different length!".format(
tensors, names)
ret = copy.copy(placeholders)
placeholder_names = [p.name for p in placeholders]
for name, tensor in zip(names, tensors):
tensorname = get_op_tensor_name(name)[1]
try:
idx = placeholder_names.index(tensorname)
except ValueError:
logger.error("Name {} is not a model input!".format(tensorname))
raise
ret[idx] = tensor
return ret
def get_sublist_by_names(lst, names):
"""
Args:
lst (list): list of objects with "name" property.
Returns:
list: a sublist of objects, matching names
"""
orig_names = [p.name for p in lst]
ret = []
for name in names:
try:
idx = orig_names.index(name)
except ValueError:
logger.error("Name {} doesn't appear in lst {}!".format(
name, str(orig_names)))
raise
ret.append(lst[idx])
return ret
@six.add_metaclass(ABCMeta)
class InputSource(object):
""" Base class for the abstract InputSource. """
_name_scope = None
_setup_done = False
def get_input_tensors(self):
"""
Returns:
list: A list of tensors corresponding to the inputs of the model,
used as input of :func:`build_graph`.
For non-placeholder tensors, should always create and return new tensors when called.
"""
return self._get_input_tensors()
@abstractmethod
def _get_input_tensors(self):
pass
@call_only_once
def setup(self, inputs_desc):
"""
Args:
inputs_desc (list[InputDesc]): list of input desc
Returns:
list[Callback]: extra callbacks needed by this InputSource.
callbacks of InputSource cannot use any `trigger*()` method.
"""
self._setup(inputs_desc)
self._setup_done = True
return self.get_callbacks()
def _setup(self, inputs_desc):
pass
def setup_done(self):
"""
Returns:
bool: whether :meth:`setup()` has been called.
"""
return self._setup_done
@memoized
def get_callbacks(self):
"""
An InputSource might need some extra maintenance during training,
which is done also through the Callback interface.
This method returns the callbacks and the return value will be memoized.
All callbacks will be automatically marked as `chief_only=False`,
so they will run on all nodes.
Returns:
list[Callback]: extra callbacks needed by this InputSource.
"""
assert self.setup_done()
ret = [CallbackFactory(
before_train=lambda _: self.reset_state())] + self._get_callbacks()
for r in ret:
r.set_chief_only(False) # no input callbacks should be chief-only
return ret
def _get_callbacks(self):
return []
def reset_state(self):
"""
Initialize/reinitialize this InputSource.
Must be called under a default session.
For training, it will get called once by the trainer in `before_train` callbacks.
For inference, the :class:`InferenceRunner` will call this method each time it is triggered.
"""
self._reset_state()
def _reset_state(self):
pass
def size(self):
"""
Returns:
int: epoch size of the InputSource
"""
return self._size()
def _size(self):
raise NotImplementedError()
@contextmanager
def cached_name_scope(self):
"""
Yield a context under a cached name scope, whose name is the name of
this InputSource class.
"""
if self._name_scope:
with tf.name_scope(self._name_scope):
yield self._name_scope
else:
name = type(self).__name__
with tf.name_scope(name) as ns:
self._name_scope = ns
yield ns
class ProxyInputSource(InputSource):
"""
An InputSource which proxy every method to ``self._input``.
"""
def __init__(self, input):
assert isinstance(input, InputSource), input
self._input = input
def _get_input_tensors(self):
return self._input.get_input_tensors()
def _setup(self, inputs_desc):
self._input.setup(inputs_desc)
def _get_callbacks(self):
return self._input.get_callbacks()
def _size(self):
return self._input.size()
def _reset_state(self):
self._input.reset_state()
def remap_input_source(input, names):
"""
When you have some :class:`InputSource` which doesn't match the inputs in
your :class:`ModelDesc`, use `RemapInputSource`.
It produces placeholders for all the inputs in your model,
except that the corresponding ones are replaced with the tensor produced
by the given :class:`InputSource`.
Args:
input(InputSource): a :class:`InputSource`, whose tensors will get mapped.
names(list[str]): list of input names corresponding to the tensors
produced by ``input``.
Returns:
InputSource:
Example:
.. code-block:: python
input1 = QueueInput(ds)
# assume ds produces 'image' and 'label', but the graph takes more
# inputs for some reasons, or takes inputs of a different order:
inputs_desc = [InputDesc(tf.float32, (None,10), 'score'),
InputDesc(tf.float32, (None,20,20,3), 'label'),
InputDesc(tf.int32, (None,), 'image') ]
input2 = remap_input_source(input1, ['image', 'label'])
input2.setup(inputs_desc)
# now, input2.get_input_tensors() will return a placeholder for 'score',
# plus the tensors returned by input1.get_input_tensors()
"""
def __init__(self, input, names):
ProxyInputSource.__init__(self, input)
assert isinstance(names, (list, tuple)), names
self._names = tuple(names)
def _setup(self, inputs):
self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs]
inputs_subset = get_sublist_by_names(inputs, self._names)
self._input.setup(inputs_subset)
def _get_input_tensors(self):
ret = self._input.get_input_tensors()
assert len(ret) == len(self._names)
return get_tensors_inputs(
self._all_placehdrs, ret, self._names)
oldcls = type(input)
# inherit oldcls so that type check in various places would work
cls = type('Remapped' + oldcls.__name__, (ProxyInputSource, oldcls), {
'__init__': __init__,
'_setup': _setup,
'_get_input_tensors': _get_input_tensors})
return cls(input, names)
|
eyaler/tensorpack
|
tensorpack/input_source/input_source_base.py
|
Python
|
apache-2.0
| 7,697
|
import json
import mock
import pytest
from awx.main.models import Credential, CredentialType, Job
from awx.api.versioning import reverse
@pytest.fixture
def ec2_source(inventory, project):
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
return inventory.inventory_sources.create(
name='some_source', update_on_project_update=True, source='ec2',
source_project=project, scm_last_revision=project.scm_revision)
@pytest.fixture
def job_template(job_template, project, inventory):
job_template.playbook = 'helloworld.yml'
job_template.project = project
job_template.inventory = inventory
job_template.ask_credential_on_launch = True
job_template.save()
return job_template
@pytest.mark.django_db
@pytest.mark.parametrize('key', ('credential', 'vault_credential'))
def test_credential_access_empty(get, job_template, admin, key):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
resp = get(url, admin)
assert resp.data[key] is None
assert key not in resp.data['summary_fields']
@pytest.mark.django_db
def test_ssh_credential_access(get, job_template, admin, machine_credential):
job_template.credentials.add(machine_credential)
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
resp = get(url, admin)
assert resp.data['credential'] == machine_credential.pk
assert resp.data['summary_fields']['credential']['credential_type_id'] == machine_credential.pk
assert resp.data['summary_fields']['credential']['kind'] == 'ssh'
@pytest.mark.django_db
@pytest.mark.parametrize('key', ('credential', 'vault_credential', 'cloud_credential', 'network_credential'))
def test_invalid_credential_update(get, patch, job_template, admin, key):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk, 'version': 'v1'})
resp = patch(url, {key: 999999}, admin, expect=400)
assert 'Credential 999999 does not exist' in json.loads(resp.content)[key]
@pytest.mark.django_db
def test_ssh_credential_update(get, patch, job_template, admin, machine_credential):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
patch(url, {'credential': machine_credential.pk}, admin, expect=200)
resp = get(url, admin)
assert resp.data['credential'] == machine_credential.pk
@pytest.mark.django_db
def test_ssh_credential_update_invalid_kind(get, patch, job_template, admin, vault_credential):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
resp = patch(url, {'credential': vault_credential.pk}, admin, expect=400)
assert 'You must provide an SSH credential.' in resp.content
@pytest.mark.django_db
def test_vault_credential_access(get, job_template, admin, vault_credential):
job_template.credentials.add(vault_credential)
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
resp = get(url, admin)
assert resp.data['vault_credential'] == vault_credential.pk
assert resp.data['summary_fields']['vault_credential']['credential_type_id'] == vault_credential.pk # noqa
assert resp.data['summary_fields']['vault_credential']['kind'] == 'vault'
@pytest.mark.django_db
def test_vault_credential_update(get, patch, job_template, admin, vault_credential):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
patch(url, {'vault_credential': vault_credential.pk}, admin, expect=200)
resp = get(url, admin)
assert resp.data['vault_credential'] == vault_credential.pk
@pytest.mark.django_db
def test_vault_credential_update_invalid_kind(get, patch, job_template, admin,
machine_credential):
url = reverse('api:job_template_detail', kwargs={'pk': job_template.pk})
resp = patch(url, {'vault_credential': machine_credential.pk}, admin, expect=400)
assert 'You must provide a vault credential.' in resp.content
@pytest.mark.django_db
def test_extra_credentials_filtering(get, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(machine_credential)
job_template.credentials.add(vault_credential)
job_template.credentials.add(credential)
url = reverse(
'api:job_template_extra_credentials_list',
kwargs={'version': 'v2', 'pk': job_template.pk}
)
resp = get(url, admin, expect=200)
assert resp.data['count'] == 1
assert resp.data['results'][0]['id'] == credential.pk
@pytest.mark.django_db
def test_extra_credentials_requires_cloud_or_net(get, post, job_template, admin,
machine_credential, vault_credential, credential,
net_credential):
url = reverse(
'api:job_template_extra_credentials_list',
kwargs={'version': 'v2', 'pk': job_template.pk}
)
for cred in (machine_credential, vault_credential):
resp = post(url, {'associate': True, 'id': cred.pk}, admin, expect=400)
assert 'Extra credentials must be network or cloud.' in resp.content
post(url, {'associate': True, 'id': credential.pk}, admin, expect=204)
assert get(url, admin).data['count'] == 1
post(url, {'associate': True, 'id': net_credential.pk}, admin, expect=204)
assert get(url, admin).data['count'] == 2
@pytest.mark.django_db
def test_prevent_multiple_machine_creds(get, post, job_template, admin, machine_credential):
url = reverse(
'api:job_template_credentials_list',
kwargs={'version': 'v2', 'pk': job_template.pk}
)
def _new_cred(name):
return {
'name': name,
'credential_type': machine_credential.credential_type.pk,
'inputs': {
'username': 'bob',
'password': 'secret',
}
}
post(url, _new_cred('First Cred'), admin, expect=201)
assert get(url, admin).data['count'] == 1
resp = post(url, _new_cred('Second Cred'), admin, expect=400)
assert 'Cannot assign multiple Machine credentials.' in resp.content
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['scm', 'insights'])
def test_invalid_credential_type_at_launch(get, post, job_template, admin, kind):
cred_type = CredentialType.defaults[kind]()
cred_type.save()
cred = Credential(
name='Some Cred',
credential_type=cred_type,
inputs={
'username': 'bob',
'password': 'secret',
}
)
cred.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {'credentials': [cred.pk]}, admin, expect=400)
assert 'Cannot assign a Credential of kind `{}`'.format(kind) in resp.data.get('credentials', [])
assert Job.objects.count() == 0
@pytest.mark.django_db
def test_prevent_multiple_machine_creds_at_launch(get, post, job_template, admin, machine_credential):
other_cred = Credential(credential_type=machine_credential.credential_type, name="Second",
inputs={'username': 'bob'})
other_cred.save()
creds = [machine_credential.pk, other_cred.pk]
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {'credentials': creds}, admin)
assert 'Cannot assign multiple Machine credentials.' in resp.content
@pytest.mark.django_db
def test_extra_credentials_unique_by_kind(get, post, job_template, admin,
credentialtype_aws):
url = reverse(
'api:job_template_extra_credentials_list',
kwargs={'version': 'v2', 'pk': job_template.pk}
)
def _new_cred(name):
return {
'name': name,
'credential_type': credentialtype_aws.pk,
'inputs': {
'username': 'bob',
'password': 'secret',
}
}
post(url, _new_cred('First Cred'), admin, expect=201)
assert get(url, admin).data['count'] == 1
resp = post(url, _new_cred('Second Cred'), admin, expect=400)
assert 'Cannot assign multiple Amazon Web Services credentials.' in resp.content
@pytest.mark.django_db
def test_ssh_credential_at_launch(get, post, job_template, admin, machine_credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'credential': machine_credential.pk}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 1
@pytest.mark.django_db
def test_vault_credential_at_launch(get, post, job_template, admin, vault_credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'vault_credential': vault_credential.pk}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 1
@pytest.mark.django_db
def test_extra_credentials_at_launch(get, post, job_template, admin, credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 1
@pytest.mark.django_db
def test_modify_ssh_credential_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(vault_credential)
job_template.credentials.add(credential)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'credential': machine_credential.pk}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 3
@pytest.mark.django_db
def test_modify_vault_credential_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(machine_credential)
job_template.credentials.add(credential)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'vault_credential': vault_credential.pk}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 3
@pytest.mark.django_db
def test_modify_extra_credentials_at_launch(get, post, job_template, admin,
machine_credential, vault_credential, credential):
job_template.credentials.add(machine_credential)
job_template.credentials.add(vault_credential)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 3
@pytest.mark.django_db
def test_overwrite_ssh_credential_at_launch(get, post, job_template, admin, machine_credential):
job_template.credentials.add(machine_credential)
new_cred = machine_credential
new_cred.pk = None
new_cred.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
pk = post(url, {'credential': new_cred.pk}, admin, expect=201).data['job']
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
assert len(summary_fields['credentials']) == 1
assert summary_fields['credentials'][0]['id'] == new_cred.pk
@pytest.mark.django_db
def test_ssh_password_prompted_at_launch(get, post, job_template, admin, machine_credential):
job_template.credentials.add(machine_credential)
machine_credential.inputs['password'] = 'ASK'
machine_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = get(url, admin)
assert 'ssh_password' in resp.data['passwords_needed_to_start']
@pytest.mark.django_db
def test_prompted_credential_replaced_on_launch(get, post, job_template, admin, machine_credential):
# If a JT has a credential that needs a password, but the launch POST
# specifies credential that does not require any passwords
cred2 = Credential(name='second-cred', inputs=machine_credential.inputs,
credential_type=machine_credential.credential_type)
cred2.inputs['password'] = 'ASK'
cred2.save()
job_template.credentials.add(cred2)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {}, admin, expect=400)
resp = post(url, {'credentials': [machine_credential.pk]}, admin, expect=201)
assert 'job' in resp.data
@pytest.mark.django_db
def test_ssh_credential_with_password_at_launch(get, post, job_template, admin, machine_credential):
machine_credential.inputs['password'] = 'ASK'
machine_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {'credentials': [machine_credential.pk]}, admin, expect=400)
assert resp.data['passwords_needed_to_start'] == ['ssh_password']
with mock.patch.object(Job, 'signal_start') as signal_start:
resp = post(url, {
'credentials': [machine_credential.pk],
'ssh_password': 'testing123'
}, admin, expect=201)
signal_start.assert_called_with(ssh_password='testing123')
@pytest.mark.django_db
def test_vault_password_prompted_at_launch(get, post, job_template, admin, vault_credential):
job_template.credentials.add(vault_credential)
vault_credential.inputs['vault_password'] = 'ASK'
vault_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = get(url, admin)
assert 'vault_password' in resp.data['passwords_needed_to_start']
@pytest.mark.django_db
def test_vault_credential_with_password_at_launch(get, post, job_template, admin, vault_credential):
vault_credential.inputs['vault_password'] = 'ASK'
vault_credential.save()
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {'credentials': [vault_credential.pk]}, admin, expect=400)
assert resp.data['passwords_needed_to_start'] == ['vault_password']
with mock.patch.object(Job, 'signal_start') as signal_start:
resp = post(url, {
'credentials': [vault_credential.pk],
'vault_password': 'testing123'
}, admin, expect=201)
signal_start.assert_called_with(vault_password='testing123')
@pytest.mark.django_db
def test_extra_creds_prompted_at_launch(get, post, job_template, admin, net_credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
resp = post(url, {'extra_credentials': [net_credential.pk]}, admin, expect=201)
summary_fields = get(
reverse('api:job_detail', kwargs={'pk': resp.data['job']}),
admin
).data['summary_fields']
assert len(summary_fields['credentials']) == 1
@pytest.mark.django_db
def test_invalid_mixed_credentials_specification(get, post, job_template, admin, net_credential):
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
post(url=url, data={'credentials': [net_credential.pk], 'extra_credentials': [net_credential.pk]},
user=admin, expect=400)
@pytest.mark.django_db
def test_rbac_default_credential_usage(get, post, job_template, alice, machine_credential):
job_template.credentials.add(machine_credential)
job_template.execute_role.members.add(alice)
# alice can launch; she's not adding any _new_ credentials, and she has
# execute access to the JT
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
post(url, {'credential': machine_credential.pk}, alice, expect=201)
# make (copy) a _new_ SSH cred
new_cred = Credential.objects.create(
name=machine_credential.name,
credential_type=machine_credential.credential_type,
inputs=machine_credential.inputs
)
# alice is attempting to launch with a *different* SSH cred, but
# she does not have access to it, so she cannot launch
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
post(url, {'credential': new_cred.pk}, alice, expect=403)
# if alice has gains access to the credential, she *can* launch
new_cred.use_role.members.add(alice)
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
post(url, {'credential': new_cred.pk}, alice, expect=201)
@pytest.mark.django_db
def test_inventory_source_deprecated_credential(get, patch, admin, ec2_source, credential):
url = reverse('api:inventory_source_detail', kwargs={'pk': ec2_source.pk})
patch(url, {'credential': credential.pk}, admin, expect=200)
resp = get(url, admin, expect=200)
assert json.loads(resp.content)['credential'] == credential.pk
@pytest.mark.django_db
def test_inventory_source_invalid_deprecated_credential(patch, admin, ec2_source, credential):
url = reverse('api:inventory_source_detail', kwargs={'pk': ec2_source.pk})
resp = patch(url, {'credential': 999999}, admin, expect=400)
assert 'Credential 999999 does not exist' in resp.content
@pytest.mark.django_db
def test_deprecated_credential_activity_stream(patch, admin_user, machine_credential, job_template):
job_template.credentials.add(machine_credential)
starting_entries = job_template.activitystream_set.count()
# no-op patch
patch(
job_template.get_absolute_url(),
admin_user,
data={'credential': machine_credential.pk},
expect=200
)
# no-op should not produce activity stream entries
assert starting_entries == job_template.activitystream_set.count()
@pytest.mark.django_db
def test_multi_vault_preserved_on_put(get, put, admin_user, job_template, vault_credential):
'''
A PUT request will necessarily specify deprecated fields, but if the deprecated
field is a singleton while the `credentials` relation has many, that makes
it very easy to drop those credentials not specified in the PUT data
'''
vault2 = Credential.objects.create(
name='second-vault',
credential_type=vault_credential.credential_type,
inputs={'vault_password': 'foo', 'vault_id': 'foo'}
)
job_template.credentials.add(vault_credential, vault2)
assert job_template.credentials.count() == 2 # sanity check
r = get(job_template.get_absolute_url(), admin_user, expect=200)
# should be a no-op PUT request
put(
job_template.get_absolute_url(),
admin_user,
data=r.data,
expect=200
)
assert job_template.credentials.count() == 2
|
wwitzel3/awx
|
awx/main/tests/functional/api/test_deprecated_credential_assignment.py
|
Python
|
apache-2.0
| 19,249
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
The :class:`CIIServer` class implements a CII server that can be plugged into the cherrypy
web server engine.
To create a CII Server, first create and mount the server in a cherrypy web server. Then
you can start the cherrypy server and the CII server will start to accept connections from
clients.
While the server is running, update the CII state maintained by that server and instruct it when to push
updates to all connected clients.
An :doc:`example <examples>` server is provided in this package.
Using CII Server
----------------
1. Imports and initialisation
'''''''''''''''''''''''''''''
To run a CII server, you must import both ws4py's cherrypy server and the `dvbcss.protocol.server.cii` module.
When the `dvbcss.protocol.server.cii` module is imported, it will register as a "tool" with cherrypy, so it must
be imported after cherrypy is imported.
Next, subscribe the ws4py websocket plugin to cherrypy.
.. code-block:: python
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin
from dvbcss.protocol.server.cii import CIIServer
# initialise the ws4py websocket plugin
WebSocketPlugin(cherrypy.engine).subscribe()
2. Create and mount the CII server
''''''''''''''''''''''''''''''''''
You can now create an instance of a CIIServer and mount it into the cherrypy server at a path of your choosing.
The configuration for that path must turn on the "dvb_cii" tool and pass a "handler_cls" argument whose value is the
handler class that the CIIServer instance provides via the :data:`CIIServer.handler` attribute.
For example, to create a CIIServer mounted at the URL path "/cii":
.. code-block:: python
# create CII Server
ciiServer = CIIServer(maxConnectionsAllowed=2)
# bind it to the URL path /cii in the cherrypy server
class Root(object):
@cherrypy.expose
def cii(self):
pass
# construct the configuration for this path, providing the handler and turning on the tool hook
cfg = {"/cii": {'tools.dvb_cii.on': True,
'tools.dvb_cii.handler_cls': ciiServer.handler
}
}
cherrypy.tree.mount(Root(), "/", config=cfg)
3. Start cherrypy running
'''''''''''''''''''''''''
Start cherrypy running and our CII server will start to accept connections from clients:
.. code-block:: python
# configure cherrypy to serve on port 7681
cherrypy.config.update({"server.socket_port":7681})
# activate cherrypy web server (non blocking)
cherrypy.engine.start()
The cherrypy engine runs in a background thread when the cherrypy engine is started.
4. Setting CII state and pushing it to connected clients
''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The :data:`CIIServer.cii` is a CII message object representing the CII state. Your code can read and alter the attributes of this
message object to update the server side state.
When a client first connects, a CII message object will automatically be sent to that client to send it the current CII state.
Your code does not need to do this.
If you update the CII state then you need to ask the CII server to push a change to all connected clients.
To do this call the :func:`CIIServer.updateClients`
method. By default this will only push changes to CII state, and will not send a message at all if there is no change.
However this behaviour can be overridden.
.. code-block:: python
ciiServer.cii.contentId = "dvb://233a.1004.1080"
ciiServer.cii.contentIdStatus = "partial"
ciiServer.updateClients()
...
ciiServer.cii.contentId = "dvb://233a.1004.1080;21af~20131004T1015Z--PT01H00M"
ciiServer.cii.contentIdStatus = "final"
ciiServer.updateClients()
Intelligently setting the host and port in tsUrl, teUrl and wcUrl properties
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
:class:`CIIServer` has built in support to help in situations where it is difficult to determine the host and port
to which clients are connecting in order to contact the CII Server, or where CIIServer might be contacted via more than
one network interface.
At initialisation, pass the optional `rewriteHostPort` argument, setting it to a list of properties for which you want
it to fix the host/port in URLs. Then within the CII, put `{{host}}` and `{{port}}` in place of the host and port number. The
CIIServer will then automatically substitute this in the properties you have listed.
For example:
.. code-block:: python
ciiServer = CIIServer(rewriteHostPort=['tsUrl','wcUrl'])
ciiServer.cii = CII(
tsUrl='ws://{{host}}:{{port}}/ts',
wcUrl='udp://{{host}}:6677'
)
This will be done transparently and individually for each connected client. The `cii` property of the CII server will
contain the `{{host}}` and `{{port}}` patterns before the substitution takes place.
What does CIIServer do for you and what does it not?
----------------------------------------------------
:class:`CIIServer` handles the connection and disconnection of clients without requiring any further intervention.
It ensure the current state in its :data:`~CIIServer.cii` property is sent, in a CII message, to the client as soon as it connects.
The role of your code is to update the :data:`~CIIServer.cii` object as state changes, and to inform the CIIServer
when it is time to update any connected clients by informing them of the changes to state by calling the :func:`~CIIServer.updateClients` method.
"""
import cherrypy
from dvbcss.protocol.server import WSServerTool
from dvbcss.protocol.server import WSServerBase
from dvbcss.protocol.cii import CII
from dvbcss.protocol import OMIT
cherrypy.tools.dvb_cii = WSServerTool()
class CIIServer(WSServerBase):
"""\
The CIIServer class implements a server for the CSS-CII protocol. It transparently manages
the connection and disconnection of clients and provides an interface for simply setting the
CII state and requesting that it be pushed to any connected clients.
Must be used in conjunction with a cherrypy web server:
1. Ensure the ws4py :class:`~ws4py.server.cherrypyserver.WebSocketPlugin` is subscribed, to the cherrypy server. E.g.
.. code-block:: python
WebSocketPlugin(cherrypy.engine).subscribe()
2. Mount the instance onto a particular URL path on a cherrypy web server. Set the config
properties for the URL it is to be mounted at as follows:
.. code-block:: python
{ 'tools.dvb_cii.on' : True,
'tools.dvb_cii.handler_cls': myCiiServerInstance.handler }
Update the :data:`cii` property with the CII state information and call the :func:`updateClients` method to propagate state changes to
any connected clients.
When the server is "disabled" it will refuse attempts to connect by sending the HTTP status response 403 "Forbidden".
When the server has reached its connection limit, it will refuse attempts to connect by sending the HTTP status response 503 "Service unavailable".
This object provides properties:
* :data:`enabled` (read/write) controls whether this server is enabled or not
* :data:`cii` (read/write) the CII state that is being shared to connected clients
To allow for servers serving multiple network interfaces, or where the IP address of the interface is not easy to determine, CII Server can be
asked to automatically substitute the host and port with the one that the client connected to. Specify the list of property names for which this
shoud happen as an optional `rewritheostPort` argument when intialising the CIIServer, then use `{{host}}` `{{port}}` within those properties.
"""
connectionIdPrefix = "cii"
loggingName = "dvb-css.protocol.server.cii.CIIServer"
getDefaultConnectionData = lambda self: { "prevCII" : CII() } # default state for a new connection - no CII info transferred to client yet
def __init__(self, maxConnectionsAllowed=-1, enabled=True, initialCII = CII(protocolVersion="1.1"), rewriteHostPort=[]):
"""\
**Initialisation takes the following parameters:**
:param maxConnectionsAllowed: (int, default=-1) Maximum number of concurrent connections to be allowed, or -1 to allow as many connections as resources allow.
:param enabled: (bool, default=True) Whether the endpoint is initially enabled (True) or disabled (False)
:param initialCII: (:class:`dvbcss.protocol.cii.CII`, default=CII(protocolVersion="1.1")) Initial value of CII state.
:param rewriteHostPort: (list) List of CII property names for which the sub-string '{{host}}' '{{port}}' will be replaced with the host and port that the client connected to.
"""
super(CIIServer,self).__init__(maxConnectionsAllowed=maxConnectionsAllowed, enabled=enabled)
self.cii = initialCII.copy()
self._rewriteHostPort = rewriteHostPort[:]
"""\
A :class:`dvbcss.protocol.cii.CII` message object representing current CII state.
Set the attributes of this object to update that state.
When :func:`updateClients` is called, it is this state that will be sent to connected clients.
"""
def _customiseCii(self, webSock):
cii = self.cii.copy()
host = webSock.local_address[0]
port = str(webSock.local_address[1])
for propName in cii.definedProperties():
if propName in self._rewriteHostPort:
propVal = getattr(cii,propName).replace("{{host}}", host).replace("{{port}}", port)
setattr(cii, propName, propVal)
return cii
def updateClients(self, sendOnlyDiff=True,sendIfEmpty=False):
"""\
Send update of current CII state from the :data:`CIIServer.cii` object to all connected clients.
:param sendOnlyDiff: (bool, default=True) Send only the properties in the CII state that have changed since last time a message was sent. Set to False to send the entire message.
:param sendIfEmpty: (bool, default=False) Set to True to force that a CII message be sent, even if it will be empty (e.g. no change since last time)
By default this method will only send a CII message to clients informing them of the differencesin state since last time a message was sent to them.
If no properties have changed at all, then no message will be sent.
The two optional arguments allow you to change this behaviour. For example, to force the messages sent to include all properties, even if they have not changed:
.. code-block:: python
myCiiServer.updateClients(sendOnlyDiff=False)
To additionally force it to send even if the CII state held at this server has no values for any of the properties:
.. code-block:: python
myCiiServer.updateClients(sendOnlyDiff=False, sendIfEmpty=True)
"""
connections = self.getConnections()
for webSock in connections:
self.log.debug("Sending CII to connection "+webSock.id())
connectionData = connections[webSock]
prevCII = connectionData["prevCII"]
# perform rewrite substitutions, if any
cii = self._customiseCii(webSock)
# work out whether we are sending the full CII or a diff
if sendOnlyDiff:
diff = CII.diff(prevCII, cii)
toSend = diff
# enforce requirement that contentId must be accompanied by contentIdStatus
if diff.contentId != OMIT:
toSend.contentIdStatus = cii.contentIdStatus
else:
toSend = cii
# only send if forced to, or if the mesage to send is not empty (all OMITs)
if sendIfEmpty or toSend.definedProperties():
webSock.send(toSend.pack())
connectionData["prevCII"] = cii.copy()
def onClientConnect(self, webSock):
"""If you override this method you must call the base class implementation."""
self.log.info("Sending initial CII message for connection "+webSock.id())
cii = self._customiseCii(webSock)
webSock.send(cii.pack())
self.getConnections()[webSock]["prevCII"] = cii.copy()
def onClientDisconnect(self, webSock, connectionData):
"""If you override this method you must call the base class implementation."""
pass
def onClientMessage(self, webSock, message):
"""If you override this method you must call the base class implementation."""
self.log.info("Received unexpected message on connection"+webSock.id()+" : "+str(message))
|
bbc/pydvbcss
|
dvbcss/protocol/server/cii.py
|
Python
|
apache-2.0
| 13,575
|
#!/usr/bin/python
# Copyright 2015 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: kolla_docker
short_description: Module for controlling Docker
description:
- A module targeting at controlling Docker as used by Kolla.
options:
common_options:
description:
- A dict containing common params such as login info
required: False
type: dict
default: dict()
action:
description:
- The action the module should take
required: True
type: str
choices:
- compare_image
- create_volume
- get_container_env
- get_container_state
- pull_image
- remove_container
- remove_volume
- restart_container
- start_container
- stop_container
api_version:
description:
- The version of the api for docker-py to use when contacting docker
required: False
type: str
default: auto
auth_email:
description:
- The email address used to authenticate
required: False
type: str
auth_password:
description:
- The password used to authenticate
required: False
type: str
auth_registry:
description:
- The registry to authenticate to
required: False
type: str
auth_username:
description:
- The username used to authenticate
required: False
type: str
detach:
description:
- Detach from the container after it is created
required: False
default: True
type: bool
name:
description:
- Name of the container or volume to manage
required: False
type: str
environment:
description:
- The environment to set for the container
required: False
type: dict
image:
description:
- Name of the docker image
required: False
type: str
ipc_mode:
description:
- Set docker ipc namespace
required: False
type: str
default: None
choices:
- host
labels:
description:
- List of labels to apply to container
required: False
type: dict
default: dict()
pid_mode:
description:
- Set docker pid namespace
required: False
type: str
default: None
choices:
- host
privileged:
description:
- Set the container to privileged
required: False
default: False
type: bool
remove_on_exit:
description:
- When not detaching from container, remove on successful exit
required: False
default: True
type: bool
restart_policy:
description:
- Determine what docker does when the container exits
required: False
type: str
choices:
- never
- on-failure
- always
restart_retries:
description:
- How many times to attempt a restart if restart_policy is set
type: int
default: 10
volumes:
description:
- Set volumes for docker to use
required: False
type: list
volumes_from:
description:
- Name or id of container(s) to use volumes from
required: True
type: list
author: Sam Yaple
'''
EXAMPLES = '''
- hosts: kolla_docker
tasks:
- name: Start container
kolla_docker:
image: ubuntu
name: test_container
action: start_container
- name: Remove container
kolla_docker:
name: test_container
action: remove_container
- name: Pull image without starting container
kolla_docker:
action: pull_container
image: private-registry.example.com:5000/ubuntu
- name: Create named volume
action: create_volume
name: name_of_volume
- name: Remove named volume
action: remove_volume
name: name_of_volume
'''
import os
import docker
class DockerWorker(object):
def __init__(self, module):
self.module = module
self.params = self.module.params
self.changed = False
# TLS not fully implemented
# tls_config = self.generate_tls()
options = {
'version': self.params.get('api_version')
}
self.dc = docker.Client(**options)
def generate_tls(self):
tls = {'verify': self.params.get('tls_verify')}
tls_cert = self.params.get('tls_cert'),
tls_key = self.params.get('tls_key'),
tls_cacert = self.params.get('tls_cacert')
if tls['verify']:
if tlscert:
self.check_file(tls['tls_cert'])
self.check_file(tls['tls_key'])
tls['client_cert'] = (tls_cert, tls_key)
if tlscacert:
self.check_file(tls['tls_cacert'])
tls['verify'] = tls_cacert
return docker.tls.TLSConfig(**tls)
def check_file(self, path):
if not os.path.isfile(path):
self.module.fail_json(
failed=True,
msg='There is no file at "{}"'.format(path)
)
if not os.access(path, os.R_OK):
self.module.fail_json(
failed=True,
msg='Permission denied for file at "{}"'.format(path)
)
def check_image(self):
find_image = ':'.join(self.parse_image())
for image in self.dc.images():
for image_name in image['RepoTags']:
if image_name == find_image:
return image
def check_volume(self):
for vol in self.dc.volumes()['Volumes'] or list():
if vol['Name'] == self.params.get('name'):
return vol
def check_container(self):
find_name = '/{}'.format(self.params.get('name'))
for cont in self.dc.containers(all=True):
if find_name in cont['Names']:
return cont
def get_container_info(self):
container = self.check_container()
if not container:
return None
return self.dc.inspect_container(self.params.get('name'))
def check_container_differs(self):
container_info = self.get_container_info()
return (
self.compare_image(container_info) or
self.compare_ipc_mode(container_info) or
self.compare_labels(container_info) or
self.compare_privileged(container_info) or
self.compare_pid_mode(container_info) or
self.compare_volumes(container_info) or
self.compare_volumes_from(container_info) or
self.compare_environment(container_info)
)
def compare_ipc_mode(self, container_info):
new_ipc_mode = self.params.get('ipc_mode')
current_ipc_mode = container_info['HostConfig'].get('IpcMode')
if not current_ipc_mode:
current_ipc_mode = None
if new_ipc_mode != current_ipc_mode:
return True
def compare_pid_mode(self, container_info):
new_pid_mode = self.params.get('pid_mode')
current_pid_mode = container_info['HostConfig'].get('PidMode')
if not current_pid_mode:
current_pid_mode = None
if new_pid_mode != current_pid_mode:
return True
def compare_privileged(self, container_info):
new_privileged = self.params.get('privileged')
current_privileged = container_info['HostConfig']['Privileged']
if new_privileged != current_privileged:
return True
def compare_image(self, container_info=None):
container_info = container_info or self.get_container_info()
if not container_info:
return True
new_image = self.check_image()
current_image = container_info['Image']
if not new_image:
return True
if new_image['Id'] != current_image:
return True
def compare_labels(self, container_info):
new_labels = self.params.get('labels')
current_labels = container_info['Config'].get('Labels', dict())
image_labels = self.check_image().get('Labels', dict())
for k, v in image_labels.iteritems():
if k in new_labels:
if v != new_labels[k]:
return True
else:
del current_labels[k]
if new_labels != current_labels:
return True
def compare_volumes_from(self, container_info):
new_vols_from = self.params.get('volumes_from')
current_vols_from = container_info['HostConfig'].get('VolumesFrom')
if not new_vols_from:
new_vols_from = list()
if not current_vols_from:
current_vols_from = list()
if set(current_vols_from).symmetric_difference(set(new_vols_from)):
return True
def compare_volumes(self, container_info):
volumes, binds = self.generate_volumes()
current_vols = container_info['Config'].get('Volumes')
current_binds = container_info['HostConfig'].get('Binds')
if not volumes:
volumes = list()
if not current_vols:
current_vols = list()
if not current_binds:
current_binds = list()
if set(volumes).symmetric_difference(set(current_vols)):
return True
new_binds = list()
if binds:
for k, v in binds.iteritems():
new_binds.append("{}:{}:{}".format(k, v['bind'], v['mode']))
if set(new_binds).symmetric_difference(set(current_binds)):
return True
def compare_environment(self, container_info):
if self.params.get('environment'):
current_env = dict()
for kv in container_info['Config'].get('Env', list()):
k, v = kv.split('=', 1)
current_env.update({k: v})
for k, v in self.params.get('environment').iteritems():
if k not in current_env:
return True
if current_env[k] != v:
return True
def parse_image(self):
full_image = self.params.get('image')
if '/' in full_image:
registry, image = full_image.split('/', 1)
else:
image = full_image
if ':' in image:
return full_image.rsplit(':', 1)
else:
return full_image, 'latest'
def pull_image(self):
if self.params.get('auth_username'):
self.dc.login(
username=self.params.get('auth_username'),
password=self.params.get('auth_password'),
registry=self.params.get('auth_registry'),
email=self.params.get('auth_email')
)
image, tag = self.parse_image()
statuses = [
json.loads(line.strip()) for line in self.dc.pull(
repository=image, tag=tag, stream=True
)
]
for status in reversed(statuses):
if 'error' in status:
if status['error'].endswith('not found'):
self.module.fail_json(
msg="The requested image does not exist: {}:{}".format(
image, tag),
failed=True
)
else:
self.module.fail_json(
msg="Unknown error message: {}".format(
status['error']),
failed=True
)
if status and status.get('status'):
# NOTE(SamYaple): This allows us to use v1 and v2 docker
# registries. Eventually docker will stop supporting v1
# registries and when that happens we can remove this.
if 'legacy registry' in status['status']:
continue
elif 'Downloaded newer image for' in status['status']:
self.changed = True
return
elif 'Image is up to date for' in status['status']:
return
else:
self.module.fail_json(
msg="Unknown status message: {}".format(
status['status']),
failed=True
)
def remove_container(self):
if self.check_container():
self.changed = True
self.dc.remove_container(
container=self.params.get('name'),
force=True
)
def generate_volumes(self):
volumes = self.params.get('volumes')
if not volumes:
return None, None
vol_list = list()
vol_dict = dict()
for vol in volumes:
if ':' not in vol:
vol_list.append(vol)
continue
split_vol = vol.split(':')
if (len(split_vol) == 2
and ('/' not in split_vol[0] or '/' in split_vol[1])):
split_vol.append('rw')
vol_list.append(split_vol[1])
vol_dict.update({
split_vol[0]: {
'bind': split_vol[1],
'mode': split_vol[2]
}
})
return vol_list, vol_dict
def build_host_config(self, binds):
options = {
'network_mode': 'host',
'ipc_mode': self.params.get('ipc_mode'),
'pid_mode': self.params.get('pid_mode'),
'privileged': self.params.get('privileged'),
'volumes_from': self.params.get('volumes_from')
}
if self.params.get('restart_policy') in ['on-failure', 'always']:
options['restart_policy'] = {
'Name': self.params.get('restart_policy'),
'MaximumRetryCount': self.params.get('restart_retries')
}
if binds:
options['binds'] = binds
return self.dc.create_host_config(**options)
def _format_env_vars(self):
env = self.params.get('environment')
return {k: "" if env[k] is None else env[k] for k in env}
def build_container_options(self):
volumes, binds = self.generate_volumes()
return {
'detach': self.params.get('detach'),
'environment': self._format_env_vars(),
'host_config': self.build_host_config(binds),
'labels': self.params.get('labels'),
'image': self.params.get('image'),
'name': self.params.get('name'),
'volumes': volumes,
'tty': True
}
def create_container(self):
self.changed = True
options = self.build_container_options()
self.dc.create_container(**options)
def start_container(self):
if not self.check_image():
self.pull_image()
container = self.check_container()
if container and self.check_container_differs():
self.remove_container()
container = self.check_container()
if not container:
self.create_container()
container = self.check_container()
if not container['Status'].startswith('Up '):
self.changed = True
self.dc.start(container=self.params.get('name'))
# We do not want to detach so we wait around for container to exit
if not self.params.get('detach'):
rc = self.dc.wait(self.params.get('name'))
if rc != 0:
self.module.fail_json(
failed=True,
changed=True,
msg="Container exited with non-zero return code"
)
if self.params.get('remove_on_exit'):
self.remove_container()
def get_container_env(self):
name = self.params.get('name')
info = self.get_container_info()
if not info:
self.module.fail_json(msg="No such container: {}".format(name))
else:
envs = dict()
for env in info['Config']['Env']:
if '=' in env:
key, value = env.split('=', 1)
else:
key, value = env, ''
envs[key] = value
self.module.exit_json(**envs)
def get_container_state(self):
name = self.params.get('name')
info = self.get_container_info()
if not info:
self.module.fail_json(msg="No such container: {}".format(name))
else:
self.module.exit_json(**info['State'])
def stop_container(self):
name = self.params.get('name')
container = self.check_container()
if not container['Status'].startswith('Exited '):
self.changed = True
self.dc.stop(name)
def restart_container(self):
name = self.params.get('name')
info = self.get_container_info()
if not info:
self.module.fail_json(
msg="No such container: {}".format(name))
else:
self.changed = True
self.dc.restart(name)
def create_volume(self):
if not self.check_volume():
self.changed = True
self.dc.create_volume(name=self.params.get('name'), driver='local')
def remove_volume(self):
if self.check_volume():
self.changed = True
try:
self.dc.remove_volume(name=self.params.get('name'))
except docker.errors.APIError as e:
if e.response.status_code == 409:
self.module.fail_json(
failed=True,
msg="Volume named '{}' is currently in-use".format(
self.params.get('name')
)
)
raise
def generate_module():
argument_spec = dict(
common_options=dict(required=False, type='dict', default=dict()),
action=dict(requried=True, type='str', choices=['compare_image',
'create_volume',
'get_container_env',
'get_container_state',
'pull_image',
'remove_container',
'remove_volume',
'restart_container',
'start_container',
'stop_container']),
api_version=dict(required=False, type='str', default='auto'),
auth_email=dict(required=False, type='str'),
auth_password=dict(required=False, type='str'),
auth_registry=dict(required=False, type='str'),
auth_username=dict(required=False, type='str'),
detach=dict(required=False, type='bool', default=True),
labels=dict(required=False, type='dict', default=dict()),
name=dict(required=False, type='str'),
environment=dict(required=False, type='dict'),
image=dict(required=False, type='str'),
ipc_mode=dict(required=False, type='str', choices=['host']),
pid_mode=dict(required=False, type='str', choices=['host']),
privileged=dict(required=False, type='bool', default=False),
remove_on_exit=dict(required=False, type='bool', default=True),
restart_policy=dict(required=False, type='str', choices=['no',
'never',
'on-failure',
'always']),
restart_retries=dict(required=False, type='int', default=10),
tls_verify=dict(required=False, type='bool', default=False),
tls_cert=dict(required=False, type='str'),
tls_key=dict(required=False, type='str'),
tls_cacert=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
volumes_from=dict(required=False, type='list')
)
required_together = [
['tls_cert', 'tls_key']
]
return AnsibleModule(
argument_spec=argument_spec,
required_together=required_together
)
def generate_nested_module():
module = generate_module()
# We unnest the common dict and the update it with the other options
new_args = module.params.get('common_options')
new_args.update(module._load_params()[0])
module.params = new_args
# Override ARGS to ensure new args are used
global MODULE_ARGS
global MODULE_COMPLEX_ARGS
MODULE_ARGS = ''
MODULE_COMPLEX_ARGS = json.dumps(module.params)
# Reprocess the args now that the common dict has been unnested
return generate_module()
def main():
module = generate_nested_module()
# TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
if (module.params.get('action') in ['pull_image', 'start_container']
and not module.params.get('image')):
module.fail_json(
msg="missing required arguments: image",
failed=True
)
# TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
if (module.params.get('action') != 'pull_image'
and not module.params.get('name')):
module.fail_json(
msg="missing required arguments: name",
failed=True
)
try:
dw = DockerWorker(module)
# TODO(inc0): We keep it bool to have ansible deal with consistent
# types. If we ever add method that will have to return some
# meaningful data, we need to refactor all methods to return dicts.
result = bool(getattr(dw, module.params.get('action'))())
module.exit_json(changed=dw.changed, result=result)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=repr(e))
# import module snippets
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
|
rthallisey/kolla-kubernetes-personal
|
kolla/ansible/library/kolla_docker.py
|
Python
|
apache-2.0
| 22,596
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest fixture."""
import sys
from absl import flags
import pytest
@pytest.fixture(scope='session', autouse=True)
def parse_flags():
# Only pass the first item, because pytest flags shouldn't be parsed as absl
# flags.
flags.FLAGS(sys.argv[:1])
|
google/python-spanner-orm
|
spanner_orm/tests/conftest.py
|
Python
|
apache-2.0
| 833
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script for mass-commenting Jenkins test triggers on a Beam PR."""
import itertools
import os
import socket
import sys
import time
import traceback
import re
import requests
from datetime import datetime
COMMENTS_TO_ADD = [
"Run Release Gradle Build",
"Run Go PostCommit",
"Run Java PostCommit",
"Run Java Flink PortableValidatesRunner Batch",
"Run Java Flink PortableValidatesRunner Streaming",
"Run Apex ValidatesRunner",
"Run Dataflow ValidatesRunner",
"Run Flink ValidatesRunner",
"Run Gearpump ValidatesRunner",
"Run Samza ValidatesRunner",
"Run Spark ValidatesRunner",
"Run Java Spark PortableValidatesRunner Batch",
"Run Python Spark ValidatesRunner",
"Run Python Dataflow ValidatesContainer",
"Run Python Dataflow ValidatesRunner",
"Run Python 3.5 Flink ValidatesRunner",
"Run Python 2 PostCommit",
"Run Python 3.5 PostCommit",
"Run Python 3.6 PostCommit",
"Run Python 3.7 PostCommit",
"Run SQL PostCommit",
"Run Go PreCommit",
"Run Java PreCommit",
"Run Java_Examples_Dataflow PreCommit",
"Run JavaPortabilityApi PreCommit",
"Run Portable_Python PreCommit",
"Run PythonLint PreCommit",
"Run Python PreCommit",
"Run Python DockerBuild PreCommit"
]
def executeGHGraphqlQuery(accessToken, query):
'''Runs graphql query on GitHub.'''
url = 'https://api.github.com/graphql'
headers = {'Authorization': 'Bearer %s' % accessToken}
r = requests.post(url=url, json={'query': query}, headers=headers)
return r.json()
def getSubjectId(accessToken, prNumber):
query = '''
query FindPullRequestID {
repository(owner:"apache", name:"beam") {
pullRequest(number:%s) {
id
}
}
}
''' % prNumber
response = executeGHGraphqlQuery(accessToken, query)
return response['data']['repository']['pullRequest']['id']
def fetchGHData(accessToken, subjectId, commentBody):
'''Fetches GitHub data required for reporting Beam metrics'''
query = '''
mutation AddPullRequestComment {
addComment(input:{subjectId:"%s",body: "%s"}) {
commentEdge {
node {
createdAt
body
}
}
subject {
id
}
}
}
''' % (subjectId, commentBody)
return executeGHGraphqlQuery(accessToken, query)
def postComments(accessToken, subjectId):
'''
Main workhorse method. Fetches data from GitHub and puts it in metrics table.
'''
for commentBody in COMMENTS_TO_ADD:
jsonData = fetchGHData(accessToken, subjectId, commentBody)
print(jsonData)
def probeGitHubIsUp():
'''
Returns True if GitHub responds to simple queries. Else returns False.
'''
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('github.com', 443))
return True if result == 0 else False
################################################################################
if __name__ == '__main__':
'''
This script is supposed to be invoked directly.
However for testing purposes and to allow importing,
wrap work code in module check.
'''
print("Started.")
if not probeGitHubIsUp():
print("GitHub is unavailable, skipping fetching data.")
exit()
print("GitHub is available start fetching data.")
accessToken = input("Enter your Github access token: ")
pr = input("Enter the Beam PR number to test (e.g. 11403): ")
subjectId = getSubjectId(accessToken, pr)
postComments(accessToken, subjectId)
print("Fetched data.")
print('Done.')
|
iemejia/incubator-beam
|
release/src/main/scripts/mass_comment.py
|
Python
|
apache-2.0
| 4,252
|
import socket
import platform
import multiprocessing
from distutils.version import LooseVersion
SocketBase = socket.socket
MpPipe = multiprocessing.Pipe
MpQueue = multiprocessing.Queue
MpProcess = multiprocessing.Process
ipdb_nl_async = True
commit_barrier = 0
# save uname() on startup time: it is not so
# highly possible that the kernel will be
# changed in runtime, while calling uname()
# every time is a bit expensive
uname = platform.uname()
machine = platform.machine()
arch = platform.architecture()[0]
kernel = LooseVersion(uname[2]).version[:3]
|
drzaeus77/pyroute2
|
pyroute2/config/__init__.py
|
Python
|
apache-2.0
| 559
|
"""empty message
Revision ID: 4acdc03c38b0
Revises: 1d85516e9a94
Create Date: 2014-08-12 17:21:41.726000
"""
# revision identifiers, used by Alembic.
revision = '4acdc03c38b0'
down_revision = '1d85516e9a94'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('click', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('article', 'click')
### end Alembic commands ###
|
alwayssayyes/practice-tutti
|
migrations/versions/4acdc03c38b0_.py
|
Python
|
apache-2.0
| 605
|
# -*- coding: utf-8 -*-
def command():
return "attach-load-balancer"
def init_argument(parser):
parser.add_argument("--load-balancer-no", required=True)
parser.add_argument("--instance-no", required=True)
def execute(requester, args):
load_balancer_no = args.load_balancer_no
instance_no = args.instance_no
parameters = {}
parameters["LoadBalancerNo"] = load_balancer_no
parameters["InstanceNo"] = instance_no
return requester.execute("/AttachLoadBalancer", parameters)
|
primecloud-controller-org/pcc-cli
|
src/pcc/api/lb/attach_load_balancer.py
|
Python
|
apache-2.0
| 511
|
# -*- coding: utf-8 -*-
#
# This file is part of django-taggit-autocomplete-modified.
#
# django-taggit-autocomplete-modified provides autocomplete functionality
# to the tags form field of django-taggit.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-taggit-autocomplete-modified
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-taggit-autocomplete-modified
#
# Copyright 2011 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from taggit.forms import TagField
from taggit.managers import TaggableManager
from widgets import TagAutocomplete
class TaggableManagerAutocomplete(TaggableManager):
def formfield(self, form_class=TagField, **kwargs):
field = super(TaggableManagerAutocomplete, self).formfield(form_class, **kwargs)
field.widget = TagAutocomplete()
return field
if 'south' in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_ignored_fields
except ImportError:
pass
else:
add_ignored_fields(["^taggit_autocomplete_modified\.managers"])
|
onepercentclub/django-taggit-autocomplete-modified
|
src/taggit_autocomplete_modified/managers.py
|
Python
|
apache-2.0
| 1,736
|
#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################
# Node to monitor magnetic wheel sensor
# Sensor provides 2 digital outputs to UP GPIO:
# 1) moving: true if the wheel is rotating
# 2) direction: true if wheel direction is forward
# Note: the direction signal from the sensor is not valid for the first
# approx 1/2 wheel rotation when starting from a stopped condition
#
# This node publishes on topic 'wheel' a String message with values:
# * 'Not moving'
# * 'Moving forward'
# * 'Moving reverse'
# * 'Moving direction unknown'
############################################################
import rospy
import rospkg
import RPi.GPIO as GPIO
import time
from std_msgs.msg import String
def wheel_sensor():
NODE_FREQ = 20 # Hz
rospy.init_node('wheel_sensor', anonymous=True)
pubWheel = rospy.Publisher('wheel', String, queue_size=10)
rate = rospy.Rate(NODE_FREQ)
moving_gpio_pin = 29
direction_gpio_pin = 31
GPIO.setmode(GPIO.BOARD)
GPIO.setup(moving_gpio_pin, GPIO.IN)
GPIO.setup(direction_gpio_pin, GPIO.IN)
DIRECTION_BLACKOUT = 0.5 # seconds after standing start when direction signal is invalid
while not rospy.is_shutdown():
try:
if not GPIO.input(moving_gpio_pin):
pubWheel.publish("Not moving")
start_time = time.time() # gets reset every 1/NODE_FREQ sec
elapsed_time = 0
else:
if elapsed_time > DIRECTION_BLACKOUT:
if GPIO.input(direction_gpio_pin):
pubWheel.publish("Moving forward")
else:
pubWheel.publish("Moving reverse")
else:
pubWheel.publish("Moving direction unknown")
elapsed_time = time.time() - start_time # how long have we been moving
rate.sleep()
except Exception, e:
# print e
#Revert all GPIO pins to their normal states (i.e. input = safe)
GPIO.cleanup()
# break
if __name__ == '__main__':
# Start the node
try:
wheel_sensor()
except rospy.ROSInterruptException:
#Revert all GPIO pins to their normal states (i.e. input = safe)
GPIO.cleanup()
pass
|
ProgrammingRobotsStudyGroup/robo_magellan
|
scripts/wheel_sensor.py
|
Python
|
apache-2.0
| 2,941
|
from django.apps import AppConfig
class PurchaseorderManageConfig(AppConfig):
name = 'PurchaseOrder_manage'
|
Pandaaaa906/ChemErpSystem
|
PurchaseOrder_manage/apps.py
|
Python
|
apache-2.0
| 114
|
'''
Created on Dec 23, 2014
@author: Debanjan Mahata
'''
from twython import TwythonStreamer
from TwitterAuthentication import keyList
from time import sleep
from random import randint
import EventInfoProcess.EventTweetProcess as tweetProcess
import EventInfoProcess.EventTweetClassifier as tweetClassify
import DataPreparation.TweetProcess as tp
from pymongo import MongoClient
#connecting to MongoDB database
mongoObj = MongoClient()
#setting the MongoDB database
db = mongoObj[""]
#setting the collection in the database for storing the Tweets
collection = db[""]
class MyStreamer(TwythonStreamer):
def extractTweetInfo(self,tweet):
tweetInfoExtractObj = tp.TweetInfoExtract(tweet)
tweetFeatures = tweetInfoExtractObj.features
return tweetFeatures
def assignInfoScore(self,tweet):
tweetObj = tweetProcess.TweetProcess(tweet)
tweetClassifyObj = tweetClassify.TweetScore()
tweetClassifyObj.setTweetInstance(tweetObj.getDataInstanceForClassification())
tweetInfoScore = tweetClassifyObj.getTweetPosScore()
return tweetInfoScore
def on_success(self, data):
if data["lang"] == "en":
dataDict = {"rawData":None,"extractedFeatures":None,"initInfoScore":0.0}
tweet = data
infoScore = self.assignInfoScore(tweet)
tweetFeatures = self.extractTweetInfo(tweet)
dataDict["rawData"] = data
dataDict["extractedFeatures"] = tweetFeatures
dataDict["initInfoScore"] = infoScore
collection.insert(dataDict)
# Want to disconnect after the first result?
# self.disconnect()
def on_error(self, status_code, data):
sleep(randint(1,60))
keys = keyList[randint(0,12)]
stream = MyStreamer(keys["APP_KEY"],keys["APP_SECRET"],keys["OAUTH_TOKEN"],keys["OAUTH_TOKEN_SECRET"])
stream.statuses.filter(track="")
## Requires Authentication as of Twitter API v1.1
while True:
try:
keys = keyList[randint(0,12)]
stream = MyStreamer(keys["APP_KEY"],keys["APP_SECRET"],keys["OAUTH_TOKEN"],keys["OAUTH_TOKEN_SECRET"])
stream.statuses.filter(track='')
except:
continue
|
dxmahata/InformativeTweetCollection
|
DataCollection/TwitterEventDataCollection.py
|
Python
|
apache-2.0
| 2,321
|
# -*- coding: utf-8 -*-
#
# Copyright 2016-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Topic distribution auxiliary functions
"""
import sys
import bigml.api
from bigml.topicmodel import TopicModel
from bigml.io import UnicodeWriter
import bigmler.utils as u
import bigmler.checkpoint as c
from bigmler.tst_reader import TstReader as TestReader
from bigmler.resourcesapi.common import NORMAL_FORMAT, FULL_FORMAT
from bigmler.resourcesapi.batch_topic_distributions import \
create_batch_topic_distribution
# symbol used in failing topic distribution
NO_DISTRIBUTION = "-"
def use_prediction_headers(test_reader, fields, args):
"""Uses header information from the test file in the prediction output
If --prediction-fields is used, retrieves the fields to exclude
from the test input in the --prediction-info full format, that includes
them all by default.
"""
exclude = []
headers = []
if (args.prediction_info == FULL_FORMAT or
args.prediction_fields is not None):
# Try to retrieve headers from the test file
if test_reader.has_headers():
input_headers = test_reader.raw_headers
else:
# if no headers are found in the test file we assume it has the
# same model input_field structure removing the objective field
input_headers = [fields[field]['name'] for field in
fields.fields_columns]
if args.prediction_fields is not None:
prediction_fields = list(map(str.strip,
args.prediction_fields.split(',')))
# Filter input_headers adding only those chosen by the user
number_of_headers = len(input_headers)
for index in range(0, number_of_headers):
if not input_headers[index] in prediction_fields:
exclude.append(index)
exclude = sorted(list(set(exclude)), reverse=True)
for index in exclude:
del input_headers[index]
input_headers.extend(headers)
headers = input_headers
return exclude, headers
def write_topic_distribution(topic_distribution_resource, output=sys.stdout,
prediction_info=NORMAL_FORMAT, input_data=None,
exclude=None):
"""Writes the final topic distribution prediction to the required output
The format of the output depends on the `prediction_info` value.
There's a brief format, that writes only the predicted value,
and a full data format that writes first the input data
used to predict followed by the topic distribution.
"""
row = []
# input data is added if prediction format is BRIEF (no confidence) or FULL
if prediction_info != NORMAL_FORMAT:
if input_data is None:
input_data = []
row = input_data
if exclude:
for index in exclude:
del row[index]
topic_probabilities = [topic['probability'] \
for topic in topic_distribution_resource]
row.extend(topic_probabilities)
try:
output.writerow(row)
except AttributeError:
try:
output.write(row)
except AttributeError:
raise AttributeError("You should provide a writeable object")
def topic_distribution_to_row(topic_distribution_resource):
"""Returns a csv row to store main topic distribution info in csv files.
"""
return [topic_distribution_resource['object']['topic_distribution']]
def local_topic_distribution(topic_models, test_reader, output, args,
exclude=None, headers=None):
"""Get local topic model and issue topic distribution prediction
"""
# Only one topic model at present
local_topic_model = TopicModel(topic_models[0], api=args.retrieve_api_)
if args.prediction_header:
headers.extend([topic['name'] for topic in local_topic_model.topics])
output.writerow(headers)
for input_data in test_reader:
input_data_dict = test_reader.dict(input_data, filtering=False)
try:
topic_distribution_info = local_topic_model.distribution(
input_data_dict)
except Exception:
topic_distribution_info = []
write_topic_distribution(topic_distribution_info,
output,
args.prediction_info, input_data, exclude)
def topic_distribution(topic_models, fields, args, session_file=None):
"""Computes a topic distribution for each entry in the `test_set`.
"""
test_set = args.test_set
test_set_header = args.test_header
output = args.predictions
test_reader = TestReader(test_set, test_set_header, fields,
None,
test_separator=args.test_separator)
with UnicodeWriter(output, lineterminator="\n") as output:
# columns to exclude if input_data is added to the prediction field
exclude, headers = use_prediction_headers(
test_reader, fields, args)
# Local topic distributions: Topic distributions are computed
# locally using topic models'
# method
message = u.dated("Creating local topic distributions.\n")
u.log_message(message, log_file=session_file, console=args.verbosity)
local_topic_distribution(topic_models, test_reader, output,
args, exclude=exclude, headers=headers)
test_reader.close()
def remote_topic_distribution( \
topic_model, test_dataset, batch_topic_distribution_args, args, \
api, resume, prediction_file=None, session_file=None, \
path=None, log=None):
"""Computes a topic distributioin for each entry in the `test_set`.
Predictions are computed remotely using the batch topic distribution call.
"""
topic_model_id = bigml.api.get_topic_model_id(topic_model)
# if resuming, try to extract dataset form log files
if resume:
message = u.dated("Batch topic distribution not found. Resuming.\n")
resume, batch_topic_distribution = c.checkpoint(
c.is_batch_topic_distribution_created, path, debug=args.debug,
message=message, log_file=session_file, console=args.verbosity)
if not resume:
batch_topic_distribution = create_batch_topic_distribution(
topic_model_id, test_dataset, batch_topic_distribution_args,
args, api, session_file=session_file, path=path, log=log)
if not args.no_csv:
file_name = api.download_batch_topic_distribution( \
batch_topic_distribution,
prediction_file)
if file_name is None:
sys.exit("Failed downloading CSV.")
if args.to_dataset:
batch_topic_distribution = bigml.api.check_resource( \
batch_topic_distribution, api=api)
new_dataset = bigml.api.get_dataset_id(
batch_topic_distribution['object']['output_dataset_resource'])
if new_dataset is not None:
message = u.dated("Batch topic distribution dataset created: %s\n"
% u.get_url(new_dataset))
u.log_message(message, log_file=session_file,
console=args.verbosity)
u.log_created_resources("batch_topic_distribution_dataset",
path, new_dataset, mode='a')
|
bigmlcom/bigmler
|
bigmler/topicdistribution.py
|
Python
|
apache-2.0
| 7,994
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import unittest
from adynaton.ldap import LightweightDirectoryAccessProtocolServer
class TestLightweightDirectoryAccessProtocolServer(unittest.TestCase):
"""
Test for Object
"""
def setUp(self):
"""
Setup for unit tests
"""
self.theclass = LightweightDirectoryAccessProtocolServer()
def test_status(self):
"""
Method Test
"""
self.assertTrue(self.theclass.status())
|
lathama/Adynaton
|
adynaton/unittests/test_LightweightDirectoryAccessProtocolServer.py
|
Python
|
apache-2.0
| 1,218
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch.version import version_string_with_vcs
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(rpc.proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
elif 'id' in port:
hostid = porttracker_db.get_port_hostid(context, port['id'])
else:
hostid = None
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
sg_enabled = sg_rpc.is_firewall_enabled()
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: sg_enabled
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
# core plugin: context is top level object
# ml2: keeps context in _plugin_context
self.servers.set_context(getattr(context, '_plugin_context', context))
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.callbacks = RestProxyCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
filter = {'network_id': [net_id]}
ports = self.get_ports(context, filters=filter)
# check if there are any tenant owned ports in-use
auto_delete_port_owners = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS
only_auto_del = all(p['device_owner'] in auto_delete_port_owners
for p in ports)
if not only_auto_del:
raise exceptions.NetworkInUse(net_id=net_id)
with context.session.begin(subtransactions=True):
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# set port status to pending. updated after rest call completes
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
if 'fixed_ips' in port['port']:
self._check_fixed_ips_and_address_pairs_no_overlap(
context, new_port)
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
@put_context_in_serverpool
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
@put_context_in_serverpool
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock.
# delete_router ends up calling _delete_port instead of delete_port.
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
@put_context_in_serverpool
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
@put_context_in_serverpool
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
@put_context_in_serverpool
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
@put_context_in_serverpool
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
@put_context_in_serverpool
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
@put_context_in_serverpool
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
router_ids = super(NeutronRestProxyV2, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
self._send_floatingip_update(context)
return router_ids
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
|
sajuptpm/neutron-ipam
|
neutron/plugins/bigswitch/plugin.py
|
Python
|
apache-2.0
| 50,723
|
import re
from datetime import datetime
import os
import time
import subprocess
from ajenti.api import *
from ajenti.plugins.main.api import SectionPlugin
from ajenti.plugins.services.api import ServiceMultiplexor
from ajenti.ui import on
from ajenti.ui.binder import Binder
from ajenti.util import platform_select
from reconfigure.configs.base import Reconfig
from reconfigure.parsers import SSVParser
from reconfigure.builders import BoundBuilder
from reconfigure.nodes import Node, PropertyNode
from reconfigure.items.bound import BoundData
open_ntpd_conf = '/etc/openntpd/ntpd.conf'
ntpd_conf = '/etc/ntp.conf'
def isopenntpd():
return not ServiceMultiplexor.get().get_one('openntpd') is None
class NTPDData(BoundData):
pass
class ServerData(BoundData):
def template(self):
return Node(
'line',
Node('token', PropertyNode('value', 'server')),
Node('token', PropertyNode('value', '0.pool.ntp.org')),
)
NTPDData.bind_collection('servers', selector=lambda x: x.children[0].get('value').value == 'server',
item_class=ServerData)
ServerData.bind_property('value', 'address', path=lambda x: x.children[1])
class NTPDConfig(Reconfig):
def __init__(self, **kwargs):
k = {
'parser': SSVParser(),
'builder': BoundBuilder(NTPDData),
}
k.update(kwargs)
Reconfig.__init__(self, **k)
@plugin
class NTPDPlugin(SectionPlugin):
def get_tz_debian(self):
return open('/etc/timezone').read().strip()
def get_tz_nondebian(self):
return os.path.realpath('/etc/localtime')[len('/usr/share/zoneinfo/'):] if os.path.islink(
'/etc/localtime') else ''
def set_tz_debian(self, timezone):
open('/etc/timezone', 'w').write(timezone + '\n')
def set_tz_nondebian(self, timezone):
tz = os.path.join('/usr/share/zoneinfo/', timezone)
if os.path.exists('/etc/localtime'):
os.unlink('/etc/localtime')
os.symlink(tz, '/etc/localtime')
openntpd = isopenntpd()
service_name = platform_select(
default='openntpd' if openntpd else 'ntp',
centos='openntpd' if openntpd else 'ntpd',
mageia='ntpd',
)
get_tz = platform_select(
debian=get_tz_debian,
default=get_tz_nondebian,
)
set_tz = platform_select(
debian=set_tz_debian,
default=set_tz_nondebian,
)
def init(self):
self.title = _('Date & Time')
self.icon = 'time'
self.category = _('System')
self.append(self.ui.inflate('ntpd:main'))
self.find('servicebar').name = self.service_name
self.find('servicebar').reload()
self.config = NTPDConfig(path=platform_select(
default=open_ntpd_conf if self.openntpd else ntpd_conf,
centos='/usr/local/etc/ntpd.conf' if self.openntpd else ntpd_conf,
freebsd='/usr/local/etc/ntpd.conf' if self.openntpd else ntpd_conf,
))
self.binder = Binder(None, self)
self.available_zones = []
for d, dirs, files in os.walk('/usr/share/zoneinfo', followlinks=False):
for f in files:
if f != 'zone.tab':
self.available_zones.append(os.path.join(d, f))
self.available_zones = [x[len('/usr/share/zoneinfo/'):] for x in self.available_zones]
self.available_zones.sort()
self.find('servers').new_item = lambda c: ServerData()
def on_page_load(self):
self.refresh()
def refresh(self):
self.config.load()
self.now = int(time.time())
self.timezone = self.get_tz()
self.binder.setup(self).populate()
@on('set', 'click')
def on_set(self):
self.binder.update()
d = datetime.fromtimestamp(self.now)
s = d.strftime('%m%d%H%M%Y')
self.set_tz(self.timezone)
subprocess.call(['date', s])
self.refresh()
@on('sync', 'click')
def on_sync(self):
self.binder.update()
if len(self.config.tree.servers) == 0:
self.context.notify('error', _('No servers defined'))
return
server = self.config.tree.servers[0].address
output = subprocess.check_output(['ntpdate', '-u', server])
self.context.notify('info', _('Done'))
self.context.notify('info', output)
self.refresh()
@on('save', 'click')
def save(self):
self.binder.update()
self.config.save()
self.refresh()
self.context.notify('info', _('Saved'))
ServiceMultiplexor.get().get_one(self.service_name).restart()
|
lupyuen/RaspberryPiImage
|
usr/share/pyshared/ajenti/plugins/ntpd/main.py
|
Python
|
apache-2.0
| 4,668
|
#!/usr/bin/python
#
# Uploads artifacts to S3.
# Produces a universe, and uploads it to S3.
# If running in jenkins ($WORKSPACE is defined), writes $WORKSPACE/stub-universe.properties
#
# Env:
# S3_BUCKET (default: infinity-artifacts)
# S3_DIR_PATH (default: autdelete7d)
# S3_URL (default: s3://${S3_BUCKET}/${S3_DIR_PATH}/<pkg_name>/<random>
# ARTIFACT_DIR (default: ...s3.amazonaws.com...)
# Base HTTP dir to use when rendering links
import logging
import os
import os.path
import random
import string
import sys
import time
import github_update
import universe_builder
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
class AWSPublisher(object):
def __init__(
self,
package_name,
input_dir_path,
artifact_paths,
package_version = 'stub-universe'):
self._dry_run = os.environ.get('DRY_RUN', '')
self._pkg_name = package_name
self._pkg_version = package_version
self._input_dir_path = input_dir_path
self._aws_region = os.environ.get('AWS_UPLOAD_REGION', '')
s3_bucket = os.environ.get('S3_BUCKET', 'infinity-artifacts')
s3_dir_path = os.environ.get('S3_DIR_PATH', 'autodelete7d')
dir_name = '{}-{}'.format(
time.strftime("%Y%m%d-%H%M%S"),
''.join([random.SystemRandom().choice(string.ascii_letters + string.digits) for i in range(16)]))
# sample s3_directory: 'infinity-artifacts/autodelete7d/kafka/20160815-134747-S6vxd0gRQBw43NNy'
self._s3_directory = os.environ.get(
'S3_URL',
's3://{}/{}/{}/{}'.format(
s3_bucket,
s3_dir_path,
self._pkg_name,
dir_name))
self._http_directory = os.environ.get(
'ARTIFACT_DIR',
'https://{}.s3.amazonaws.com/{}/{}/{}'.format(
s3_bucket,
s3_dir_path,
self._pkg_name,
dir_name))
self._github_updater = github_update.GithubStatusUpdater('upload:{}'.format(package_name))
if not os.path.isdir(input_dir_path):
err = 'Provided package path is not a directory: {}'.format(input_dir_path)
self._github_updater.update('error', err)
raise Exception(err)
# check if aws cli tools are installed
cmd = "aws --version"
ret = os.system(cmd)
if not ret == 0:
err = 'Required AWS cli tools not installed.'
self._github_updater.update('error', err)
raise Exception(err)
self._artifact_paths = []
for artifact_path in artifact_paths:
if not os.path.isfile(artifact_path):
err = 'Provided package path is not a file: {} (full list: {})'.format(artifact_path, artifact_paths)
raise Exception(err)
if artifact_path in self._artifact_paths:
err = 'Duplicate filename between "{}" and "{}". Artifact filenames must be unique.'.format(prior_path, artifact_path)
self._github_updater.update('error', err)
raise Exception(err)
self._artifact_paths.append(artifact_path)
def _upload_artifact(self, filepath):
filename = os.path.basename(filepath)
if self._aws_region:
cmd = 'aws s3 --region={} cp --acl public-read {} {}/{} 1>&2'.format(
self._aws_region, filepath, self._s3_directory, filename)
else:
cmd = 'aws s3 cp --acl public-read {} {}/{} 1>&2'.format(
filepath, self._s3_directory, filename)
if self._dry_run:
logger.info('[DRY RUN] {}'.format(cmd))
ret = 0
else:
logger.info(cmd)
ret = os.system(cmd)
if not ret == 0:
err = 'Failed to upload {} to S3'.format(filename)
self._github_updater.update('error', err)
raise Exception(err)
return '{}/{}'.format(self._http_directory, filename)
def _spam_universe_url(self, universe_url):
# write jenkins properties file to $WORKSPACE/<pkg_version>.properties:
jenkins_workspace_path = os.environ.get('WORKSPACE', '')
if jenkins_workspace_path:
properties_file = open(os.path.join(jenkins_workspace_path, '{}.properties'.format(self._pkg_version)), 'w')
properties_file.write('STUB_UNIVERSE_URL={}\n'.format(universe_url))
properties_file.write('STUB_UNIVERSE_S3_DIR={}\n'.format(self._s3_directory))
properties_file.flush()
properties_file.close()
# write URL to provided text file path:
universe_url_path = os.environ.get('UNIVERSE_URL_PATH', '')
if universe_url_path:
universe_url_file = open(universe_url_path, 'w')
universe_url_file.write('{}\n'.format(universe_url))
universe_url_file.flush()
universe_url_file.close()
num_artifacts = len(self._artifact_paths)
if num_artifacts > 1:
suffix = 's'
else:
suffix = ''
self._github_updater.update(
'success',
'Uploaded stub universe and {} artifact{}'.format(num_artifacts, suffix),
universe_url)
def upload(self):
'''generates a unique directory, then uploads artifacts and a new stub universe to that directory'''
try:
universe_path = universe_builder.UniversePackageBuilder(
self._pkg_name, self._pkg_version,
self._input_dir_path, self._http_directory, self._artifact_paths).build_zip()
except Exception as e:
err = 'Failed to create stub universe: {}'.format(str(e))
self._github_updater.update('error', err)
raise
# print universe url early
universe_url = self._upload_artifact(universe_path)
logger.info('---')
logger.info('Built and uploaded stub universe:')
logger.info(universe_url)
logger.info('---')
logger.info('Uploading {} artifacts:'.format(len(self._artifact_paths)))
for path in self._artifact_paths:
self._upload_artifact(path)
self._spam_universe_url(universe_url)
# print to stdout, while the rest is all stderr:
print(universe_url)
logger.info('---')
logger.info('Install your package using the following commands:')
logger.info('dcos package repo remove {}-aws'.format(self._pkg_name))
logger.info('dcos package repo add --index=0 {}-aws {}'.format(self._pkg_name, universe_url))
logger.info('dcos package install --yes {}'.format(self._pkg_name))
return universe_url
def print_help(argv):
logger.info('Syntax: {} <package-name> <template-package-dir> [artifact files ...]'.format(argv[0]))
logger.info(' Example: $ {} kafka /path/to/universe/jsons/ /path/to/artifact1.zip /path/to/artifact2.zip /path/to/artifact3.zip'.format(argv[0]))
logger.info('In addition, environment variables named \'TEMPLATE_SOME_PARAMETER\' will be inserted against the provided package template (with params of the form \'{{some-parameter}}\')')
def main(argv):
if len(argv) < 3:
print_help(argv)
return 1
# the package name:
package_name = argv[1]
# local path where the package template is located:
package_dir_path = argv[2].rstrip('/')
# artifact paths (to upload along with stub universe)
artifact_paths = argv[3:]
logger.info('''###
Package: {}
Template path: {}
Artifacts: {}
###'''.format(package_name, package_dir_path, ','.join(artifact_paths)))
AWSPublisher(package_name, package_dir_path, artifact_paths).upload()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
tobilg/dcos-commons
|
tools/publish_aws.py
|
Python
|
apache-2.0
| 7,896
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import types
import xmlrpclib
import six
from keystoneclient.openstack.common import timeutils
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
ioram7/keystone-federado-pgid2013
|
build/python-keystoneclient/keystoneclient/openstack/common/jsonutils.py
|
Python
|
apache-2.0
| 5,983
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from lxml import etree
import six
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.integrated import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
def _pretty_data(self, data):
if self.ctype == 'json':
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
else:
if data is None:
# Likely from missing XML file.
return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
if self.ctype == 'json':
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
else:
def to_dict(node):
ret = {}
if node.items():
ret.update(dict(node.items()))
if node.text:
ret['__content__'] = node.text
if node.tag:
ret['__tag__'] = node.tag
if node.nsmap:
ret['__nsmap__'] = node.nsmap
for element in node:
ret.setdefault(node.tag, [])
ret[node.tag].append(to_dict(element))
return ret
return to_dict(etree.fromstring(data))
@classmethod
def _get_sample_path(cls, name, dirname, suffix=''):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
if cls.extension_name:
alias = importutils.import_class(cls.extension_name).alias
parts.append(alias)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname, "../../../doc"))
return cls._get_sample_path(name, dirname)
@classmethod
def _get_template(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
template = self._get_template(name)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(name), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
_('Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
_('%(result_str)s: %(result)s is not a list.') %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append(_('Extra list items in template:'))
error.extend([repr(o) for o in expected])
if extra:
error.append(_('Extra list items in %(result_str)s:') %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s') %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
result = result.strip()
if expected != result:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s') % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status, exp_code)
response_data = response.read()
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
return {
# NOTE(treinish): Could result in a false positive, but it
# shouldn't be an issue for this case.
'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]'
'\d{2}:\d{2}:\d{2}'
'(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '-----BEGIN RSA PRIVATE KEY-----'
'[a-zA-Z0-9\n/+=]*'
'-----END RSA PRIVATE KEY-----',
'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
'Generated by Nova',
'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False):
return self._get_response(url, 'GET', strip_version=strip_version)
def _do_post(self, url, name, subs, method='POST'):
body = self._read_template(name) % subs
sample = self._get_sample(name)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body)
def _do_put(self, url, name, subs):
return self._do_post(url, name, subs, method='PUT')
def _do_delete(self, url):
return self._get_response(url, 'DELETE')
|
sacharya/nova
|
nova/tests/integrated/api_samples_test_base.py
|
Python
|
apache-2.0
| 13,038
|
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
AIRAVATA_API_VERSION = "0.17.0"
|
gouravshenoy/airavata
|
airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/api/constants.py
|
Python
|
apache-2.0
| 276
|
from .test_helper import argv_kiwi_tests
import sys
import mock
import random
from azure.common import AzureMissingResourceHttpError
from collections import namedtuple
from datetime import datetime
from mock import patch
from pytest import raises
from azurectl.account.service import AzureAccount
from azurectl.config.parser import Config
from azurectl.instance.data_disk import DataDisk
from collections import namedtuple
import azurectl
from azurectl.defaults import Defaults
from azurectl.azurectl_exceptions import (
AzureDataDiskAttachError,
AzureDataDiskCreateError,
AzureDataDiskDeleteError,
AzureDataDiskNoAvailableLun,
AzureDataDiskShowError
)
class TestDataDisk:
def setup(self):
account = AzureAccount(
Config(
region_name='East US 2', filename='../data/config'
)
)
self.service = mock.Mock()
account.get_management_service = mock.Mock(return_value=self.service)
account.get_blob_service_host_base = mock.Mock(
return_value='test.url'
)
account.storage_key = mock.Mock()
# now that that's done, instantiate a DataDisk with the account
self.data_disk = DataDisk(account)
# asynchronous API operations return a request object
self.my_request = mock.Mock(request_id=Defaults.unify_id(42))
# variables used in multiple tests
self.cloud_service_name = 'mockcloudservice'
self.instance_name = 'mockcloudserviceinstance1'
self.lun = 0
self.host_caching = 'ReadWrite'
self.disk_filename = 'mockcloudserviceinstance1-data-disk-0.vhd'
self.disk_name = 'mockcloudserviceinstance1-data-disk-0'
self.disk_url = (
'https://' +
account.storage_name() +
'.blob.' +
account.get_blob_service_host_base() + '/' +
account.storage_container() + '/' +
self.disk_filename
)
self.disk_label = 'Mock data disk'
self.disk_size = 42
self.timestamp = datetime.utcnow()
self.time_string = datetime.isoformat(self.timestamp).replace(':', '_')
self.account = account
def test_attach_error(self):
self.service.add_data_disk.side_effect = Exception
with raises(AzureDataDiskAttachError):
self.data_disk.attach(
self.disk_name,
self.cloud_service_name,
self.instance_name,
self.disk_label,
self.lun,
self.host_caching
)
@patch('azurectl.instance.data_disk.Storage')
def test_create_error_on_add_disk(self, mock_storage):
self.service.add_disk.side_effect = Exception
with raises(AzureDataDiskCreateError):
self.data_disk.create(
identifier=self.instance_name,
disk_size_in_gb=self.disk_size,
label=self.disk_label
)
@patch('azurectl.instance.data_disk.Storage')
def test_create_error_on_vhd_upload(self, mock_storage):
mock_storage.side_effect = Exception
with raises(AzureDataDiskCreateError):
self.data_disk.create(
identifier=self.instance_name, disk_size_in_gb=self.disk_size
)
def test_delete_error(self):
self.service.delete_disk.side_effect = Exception
with raises(AzureDataDiskDeleteError):
self.data_disk.delete(self.disk_name)
def test_detach_error(self):
self.service.delete_data_disk.side_effect = Exception
with raises(AzureDataDiskDeleteError):
self.data_disk.detach(self.lun, self.cloud_service_name)
def test_show_attached_error(self):
self.service.get_data_disk.side_effect = Exception
with raises(AzureDataDiskShowError):
self.data_disk.show_attached(
self.cloud_service_name, self.instance_name, self.lun
)
def test_show_attached_no_raise_for_all_lun_list(self):
self.service.get_data_disk.side_effect = Exception
result = self.data_disk.show_attached(
self.cloud_service_name
)
assert result == []
def test_show_error(self):
self.service.get_disk.side_effect = Exception
with raises(AzureDataDiskShowError):
self.data_disk.show(self.disk_name)
def test_no_available_lun_exception(self):
self.service.get_data_disk.side_effect = iter([
self.__create_mock_data_disk(i) for i in range(16)
])
with raises(AzureDataDiskNoAvailableLun):
self.data_disk._DataDisk__get_first_available_lun(
self.cloud_service_name, self.instance_name
)
@patch('azurectl.instance.data_disk.datetime')
def test_generate_filename(self, mock_timestamp):
mock_timestamp.utcnow = mock.Mock(return_value=self.timestamp)
mock_timestamp.isoformat = mock.Mock(return_value=self.time_string)
expected = '%s-data-disk-%s.vhd' % (
self.instance_name,
self.time_string
)
result = self.data_disk._DataDisk__generate_filename(
identifier=self.instance_name
)
assert result == expected
def test_get_first_available_lun(self):
self.service.get_data_disk.side_effect = iter([
self.__create_mock_data_disk(0),
self.__create_mock_data_disk(1),
AzureMissingResourceHttpError('NOT FOUND', 404)
])
result = self.data_disk._DataDisk__get_first_available_lun(
self.cloud_service_name, self.instance_name
)
assert self.service.get_data_disk.call_count == 3
assert result == 2 # 0 and 1 are taken
@patch('azurectl.instance.data_disk.datetime')
@patch('azurectl.instance.data_disk.Storage')
def test_create(self, mock_storage, mock_datetime):
self.service.add_disk.return_value = self.my_request
mock_datetime.isoformat.return_value = '0'
time_now = mock.Mock()
time_now.strftime.return_value = 1471858765
mock_datetime.now = mock.Mock(
return_value=time_now
)
result = self.data_disk.create(
identifier=self.instance_name,
disk_size_in_gb=self.disk_size,
label=self.disk_label
)
mock_storage.assert_called_once_with(
self.account, self.account.storage_container()
)
self.service.add_disk.assert_called_once_with(
media_link=self.disk_url,
name=self.data_disk.data_disk_name.replace('.vhd', ''),
label=self.disk_label,
has_operating_system=False,
os='Linux',
)
@patch('azurectl.instance.data_disk.Storage')
def test_sizes_on_create(self, mock_storage_class):
mock_storage = mock.Mock()
mock_storage_class.return_value = mock_storage
# size in GB * bytes/GB + 512 bytes for the footer
blob_size_in_bytes = self.disk_size * 1073741824 + 512
self.data_disk._DataDisk__generate_vhd_footer = mock.Mock(
return_value='mock-footer'
)
self.data_disk._DataDisk__generate_filename = mock.Mock(
return_value='mock-filename'
)
self.data_disk.create(
identifier=self.instance_name,
disk_size_in_gb=self.disk_size,
label=self.disk_label
)
self.data_disk._DataDisk__generate_vhd_footer.assert_called_once_with(
self.disk_size
)
mock_storage.upload_empty_image.assert_called_once_with(
blob_size_in_bytes, 'mock-footer', 'mock-filename'
)
def test_show(self):
self.service.get_disk.return_value = self.__create_mock_disk()
expected = self.__create_expected_disk_output()
result = self.data_disk.show(self.disk_name)
self.service.get_disk.assert_called_once_with(
self.disk_name
)
assert result == expected
def test_show_attached(self):
self.service.get_data_disk.return_value = self.__create_mock_data_disk(
self.lun
)
expected = self.__create_expected_data_disk_output(self.lun)
result = self.data_disk.show_attached(
self.cloud_service_name, self.instance_name, self.lun
)
self.service.get_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.instance_name,
self.lun
)
assert result == expected
def test_list(self):
self.service.list_disks.return_value = [self.__create_mock_disk()]
expected = self.__create_expected_disk_list_output()
result = self.data_disk.list()
self.service.list_disks.assert_called_once_with()
assert result == expected
def test_list_empty(self):
self.service.list_disks.side_effect = Exception
result = self.data_disk.list()
self.service.list_disks.assert_called_once_with()
assert result == []
def test_attach(self):
self.service.add_data_disk.return_value = self.my_request
result = self.data_disk.attach(
self.disk_name,
self.cloud_service_name,
self.instance_name,
self.disk_label,
self.lun,
self.host_caching
)
assert result == self.my_request.request_id
self.service.add_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.instance_name,
self.lun,
host_caching=self.host_caching,
disk_label=self.disk_label,
disk_name=self.disk_name
)
@patch('azurectl.instance.data_disk.datetime')
def test_attach_without_lun(self, mock_datetime):
# mock no data disks attached has to result in lun 0 assigned later
self.service.get_data_disk.side_effect = AzureMissingResourceHttpError(
'NOT FOUND', 404
)
mock_datetime.isoformat.return_value = '0'
self.service.add_data_disk.return_value = self.my_request
result = self.data_disk.attach(
self.disk_name,
self.cloud_service_name
)
self.service.add_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.cloud_service_name,
0,
disk_name=self.disk_name
)
def test_attach_by_blob_name(self):
# should send disk_name and source_media_link in order
# to create a new data-disk
self.service.add_data_disk.return_value = self.my_request
self.service.list_disks.return_value = []
result = self.data_disk.attach(
None,
self.cloud_service_name,
lun=0,
blob_name=self.disk_filename
)
self.service.add_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.cloud_service_name,
0,
disk_name=self.disk_name,
source_media_link=self.disk_url
)
def test_find_data_disk_name_for_blob_name(self):
mock_disks = [
self.__create_mock_disk()
]
result = self.data_disk._DataDisk__find_existing_disk_name_for_blob_name(
self.disk_filename,
mock_disks
)
assert result == self.disk_name
def test_attach_by_blob_name_with_existing_data_disk(self):
# should find a disk_name associated with blob_name and use it
self.service.add_data_disk.return_value = self.my_request
mock_disks = [
self.__create_mock_disk()
]
self.service.list_disks.return_value = mock_disks
result = self.data_disk.attach(
None,
self.cloud_service_name,
lun=0,
blob_name=self.disk_filename
)
self.service.add_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.cloud_service_name,
0,
disk_name=self.disk_name
)
def test_attach_by_disk_name_and_blob_name(self):
# should create a new data-disk with supplied disk_name and
# source_media_link set to blob_name url
self.service.add_data_disk.return_value = self.my_request
result = self.data_disk.attach(
self.disk_name,
self.cloud_service_name,
lun=0,
blob_name=self.disk_filename
)
self.service.add_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.cloud_service_name,
0,
disk_name=self.disk_name,
source_media_link=self.disk_url
)
def test_disk_name_or_blob_name_is_required(self):
with raises(AzureDataDiskAttachError):
self.data_disk.attach(
None, self.cloud_service_name, lun=0, blob_name=None
)
def test_detach(self):
self.service.delete_data_disk.return_value = self.my_request
result = self.data_disk.detach(
self.lun, self.cloud_service_name, self.instance_name
)
self.service.delete_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.instance_name,
self.lun,
delete_vhd=False
)
assert result == self.my_request.request_id
def test_detach_no_instance_name(self):
self.service.delete_data_disk.return_value = self.my_request
result = self.data_disk.detach(
self.lun, self.cloud_service_name
)
self.service.delete_data_disk.assert_called_once_with(
self.cloud_service_name,
self.cloud_service_name,
self.cloud_service_name,
self.lun,
delete_vhd=False
)
assert result == self.my_request.request_id
def test_delete(self):
self.service.delete_disk.return_value = self.my_request
result = self.data_disk.delete(self.disk_name)
self.service.delete_disk.assert_called_once_with(
self.disk_name, delete_vhd=True
)
def __create_mock_data_disk(self, lun):
data_disk_type = namedtuple(
'data_disk_type', [
'host_caching', 'disk_label', 'disk_name', 'lun',
'logical_disk_size_in_gb', 'media_link', 'source_media_link'
]
)
return data_disk_type(
host_caching=self.host_caching,
disk_label=self.disk_label,
disk_name=self.disk_name,
lun=lun,
logical_disk_size_in_gb=self.disk_size,
media_link=self.disk_url,
source_media_link=''
)
def __create_mock_disk(self):
disk_type = namedtuple(
'disk_type', [
'affinity_group', 'attached_to', 'has_operating_system',
'is_corrupted', 'location', 'logical_disk_size_in_gb',
'label', 'media_link', 'name', 'os', 'source_image_name'
]
)
attach_info_type = namedtuple(
'attach_info_type', [
'hosted_service_name', 'deployment_name', 'role_name'
]
)
return disk_type(
affinity_group='',
attached_to=attach_info_type(
hosted_service_name='',
deployment_name='',
role_name=''
),
has_operating_system=False,
is_corrupted=False,
location='',
logical_disk_size_in_gb=self.disk_size,
label=self.disk_label,
media_link=self.disk_url,
name=self.disk_name,
os='Linux',
source_image_name=''
)
def __create_expected_data_disk_output(self, lun):
return [
{
'size': '%d GB' % self.disk_size,
'label': self.disk_label,
'disk-url': self.disk_url,
'source-image-url': '',
'lun': lun,
'host-caching': 'ReadWrite'
}
]
def __create_expected_disk_output(self):
return {
'affinity_group': '',
'attached_to': {
'hosted_service_name': '',
'deployment_name': '',
'role_name': ''
},
'has_operating_system': False,
'is_corrupted': False,
'location': '',
'logical_disk_size_in_gb': '%d GB' % self.disk_size,
'label': self.disk_label,
'media_link': self.disk_url,
'name': self.disk_name,
'os': 'Linux',
'source_image_name': ''
}
def __create_expected_disk_list_output(self):
return [
{
'is_attached': True,
'name': self.disk_name
}
]
|
SUSE/azurectl
|
test/unit/instance_data_disk_test.py
|
Python
|
apache-2.0
| 17,303
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This pass adds a barrier before the set of final measurements. Measurements
are considered final if they are followed by no other operations (aside from
other measurements or barriers.)
"""
from qiskit.extensions.standard.barrier import Barrier
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.dagcircuit import DAGCircuit
from ..merge_adjacent_barriers import MergeAdjacentBarriers
class BarrierBeforeFinalMeasurements(TransformationPass):
"""Adds a barrier before final measurements."""
def run(self, dag):
"""Return a circuit with a barrier before last measurements."""
# Collect DAG nodes which are followed only by barriers or other measures.
final_op_types = ['measure', 'barrier']
final_ops = []
for candidate_node in dag.named_nodes(*final_op_types):
is_final_op = True
for _, child_successors in dag.bfs_successors(candidate_node):
if any(suc.type == 'op' and suc.name not in final_op_types
for suc in child_successors):
is_final_op = False
break
if is_final_op:
final_ops.append(candidate_node)
if not final_ops:
return dag
# Create a layer with the barrier and add registers from the original dag.
barrier_layer = DAGCircuit()
for qreg in dag.qregs.values():
barrier_layer.add_qreg(qreg)
for creg in dag.cregs.values():
barrier_layer.add_creg(creg)
final_qubits = {final_op.qargs[0] for final_op in final_ops}
barrier_layer.apply_operation_back(
Barrier(len(final_qubits)), list(final_qubits), [])
# Preserve order of final ops collected earlier from the original DAG.
ordered_final_nodes = [node for node in dag.topological_op_nodes()
if node in set(final_ops)]
# Move final ops to the new layer and append the new layer to the DAG.
for final_node in ordered_final_nodes:
barrier_layer.apply_operation_back(final_node.op,
final_node.qargs,
final_node.cargs)
for final_op in final_ops:
dag.remove_op_node(final_op)
dag.extend_back(barrier_layer)
# Merge the new barrier into any other barriers
adjacent_pass = MergeAdjacentBarriers()
return adjacent_pass.run(dag)
|
QISKit/qiskit-sdk-py
|
qiskit/transpiler/passes/mapping/barrier_before_final_measurements.py
|
Python
|
apache-2.0
| 3,034
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from db.db_model import User
from settings import DATABASE_NAME, HOST, PASSWORD, USER
from common.strUtil import md5
__all__=['Singleton', 'DBOBJ']
class Singleton(object):
"""
Singleton class
"""
def __init__(self, decorated):
self._decorated = decorated
def instance(self, *args, **kwargs):
try:
return self._instance
except AttributeError:
self._instance = self._decorated(*args, **kwargs)
return self._instance
def __call__(self, *args, **kwargs):
raise TypeError('Singletons must be accessed through the `Instance` method.')
@Singleton
class DBOBJ(object):
"""
The DB Class should only exits once, thats why it has the @Singleton decorator.
To Create an instance you have to use the instance method:
db = Db.instance()
"""
engine = None
session = None
def __init__(self):
ConnectString = "mysql://%s:%s@%s/%s?charset=utf8" % (USER, PASSWORD, HOST, DATABASE_NAME)
self.engine = create_engine(ConnectString, pool_size=100, pool_recycle=3600, echo=False, max_overflow=15)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def instance(self, *args, **kwargs):
"""
Dummy method, cause several IDEs can not handel singeltons in Python
"""
pass
def create_user(user_name, password, is_admin=1, is_active=1):
user = User()
user.user_name = user_name
password = md5(password)
user.password = password
user.is_admin = is_admin
user.is_active = is_active
return user
if __name__ == "__main__":
session = DBOBJ.instance().session
user = create_user('admin', 'qr_code')
session.add(user)
session.commit()
|
jinlongwang/app_qr_analysis
|
db/db_session.py
|
Python
|
apache-2.0
| 1,878
|
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Module with all the global configurable models for training."""
from ddsp.training.models.autoencoder import Autoencoder
from ddsp.training.models.inverse_synthesis import InverseSynthesis
from ddsp.training.models.midi_autoencoder import MidiAutoencoder
from ddsp.training.models.midi_autoencoder import ZMidiAutoencoder
from ddsp.training.models.model import Model
import gin
_configurable = lambda cls: gin.configurable(cls, module=__name__)
Autoencoder = _configurable(Autoencoder)
InverseSynthesis = _configurable(InverseSynthesis)
MidiAutoencoder = _configurable(MidiAutoencoder)
ZMidiAutoencoder = _configurable(ZMidiAutoencoder)
@gin.configurable
def get_model(model=gin.REQUIRED):
"""Gin configurable function get a 'global' model for use in ddsp_run.py.
Convenience for using the same model in train(), evaluate(), and sample().
Args:
model: An instantiated model, such as 'models.Autoencoder()'.
Returns:
The 'global' model specified in the gin config.
"""
return model
|
magenta/ddsp
|
ddsp/training/models/__init__.py
|
Python
|
apache-2.0
| 1,613
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import reflection
import six
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.common import identifier
from conveyor.conveyorheat.common import template_format
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import environment
from conveyor.conveyorheat.engine import resource
from conveyor.conveyorheat.engine import scheduler
from conveyor.conveyorheat.engine import stack as parser
from conveyor.conveyorheat.engine import template
from conveyor.conveyorheat.objects import stack as stack_object
from conveyor.conveyorheat.objects import stack_lock
from conveyor.conveyorheat.rpc import api as rpc_api
from conveyor.i18n import _
from conveyor.i18n import _LW
from conveyor.conveyorheat.engine.clients.client_plugin import ExceptionFilter
LOG = logging.getLogger(__name__)
class StackResource(resource.Resource):
"""Allows entire stack to be managed as a resource in a parent stack.
An abstract Resource subclass that allows the management of an entire Stack
as a resource in a parent stack.
"""
# Assume True as this is evaluated before the stack is created
# so there is no way to know for sure without subclass-specific
# template parsing.
requires_deferred_auth = True
def __init__(self, name, json_snippet, stack):
super(StackResource, self).__init__(name, json_snippet, stack)
self._nested = None
self.resource_info = None
def validate(self):
super(StackResource, self).validate()
self.validate_nested_stack()
def validate_nested_stack(self):
try:
name = "%s-%s" % (self.stack.name, self.name)
nested_stack = self._parse_nested_stack(
name,
self.child_template(),
self.child_params())
nested_stack.strict_validate = False
nested_stack.validate()
except AssertionError:
raise
except Exception as ex:
raise exception.StackValidationFailed(
error=_("Failed to validate"),
path=[self.stack.t.get_section_name('resources'), self.name],
message=six.text_type(ex))
def _outputs_to_attribs(self, json_snippet):
outputs = json_snippet.get('Outputs')
if not self.attributes and outputs:
self.attributes_schema = (
attributes.Attributes.schema_from_outputs(outputs))
# Note: it can be updated too and for show return dictionary
# with all available outputs
self.attributes = attributes.Attributes(
self.name, self.attributes_schema,
self._resolve_all_attributes)
def _needs_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=True):
# Issue an update to the nested stack if the stack resource
# is able to update. If return true, let the individual
# resources in it decide if they need updating.
# FIXME (ricolin): seems currently can not call super here
if self.nested() is None and self.status == self.FAILED:
raise exception.UpdateReplace(self)
if self.nested() and self.state == (self.DELETE, self.FAILED):
raise exception.UpdateReplace(self)
# If stack resource is in CHECK_FAILED state, raise UpdateReplace
# to replace the failed stack.
if self.state == (self.CHECK, self.FAILED):
raise exception.UpdateReplace(self)
if (check_init_complete and
self.nested() is None and
self.action == self.INIT and self.status == self.COMPLETE):
raise exception.UpdateReplace(self)
return True
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None):
try:
yield super(StackResource, self).update(after, before,
prev_resource)
except StopIteration:
with excutils.save_and_reraise_exception():
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_cancel_update(
self.context,
dict(stack_identity),
cancel_with_rollback=False)
def has_nested(self):
if self.nested() is not None:
return True
return False
def nested(self):
"""Return a Stack object representing the nested (child) stack.
If we catch NotFound exception when loading, return None.
"""
if self._nested is None and self.resource_id is not None:
try:
self._nested = parser.Stack.load(self.context,
self.resource_id)
except exception.NotFound:
return None
return self._nested
def child_template(self):
"""Default implementation to get the child template.
Resources that inherit from StackResource should override this method
with specific details about the template used by them.
"""
raise NotImplementedError()
def child_params(self):
"""Default implementation to get the child params.
Resources that inherit from StackResource should override this method
with specific details about the parameters used by them.
"""
raise NotImplementedError()
def preview(self):
"""Preview a StackResource as resources within a Stack.
This method overrides the original Resource.preview to return a preview
of all the resources contained in this Stack. For this to be possible,
the specific resources need to override both ``child_template`` and
``child_params`` with specific information to allow the stack to be
parsed correctly. If any of these methods is missing, the entire
StackResource will be returned as if it were a regular Resource.
"""
try:
child_template = self.child_template()
params = self.child_params()
except NotImplementedError:
class_name = reflection.get_class_name(self, fully_qualified=False)
LOG.warning(_LW("Preview of '%s' not yet implemented"), class_name)
return self
name = "%s-%s" % (self.stack.name, self.name)
self._nested = self._parse_nested_stack(name, child_template, params)
return self.nested().preview_resources()
def _parse_child_template(self, child_template, child_env):
parsed_child_template = child_template
if isinstance(parsed_child_template, template.Template):
parsed_child_template = parsed_child_template.t
return template.Template(parsed_child_template,
files=self.stack.t.files, env=child_env)
def _parse_nested_stack(self, stack_name, child_template,
child_params, timeout_mins=None,
adopt_data=None):
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
new_nested_depth = self._child_nested_depth()
child_env = environment.get_child_environment(
self.stack.env, child_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
# Note we disable rollback for nested stacks, since they
# should be rolled back by the parent stack on failure
nested = parser.Stack(self.context,
stack_name,
parsed_template,
timeout_mins=timeout_mins,
disable_rollback=True,
parent_resource=self.name,
owner_id=self.stack.id,
user_creds_id=self.stack.user_creds_id,
stack_user_project_id=stack_user_project_id,
adopt_stack_data=adopt_data,
nested_depth=new_nested_depth)
nested.set_parent_stack(self.stack)
return nested
def _child_nested_depth(self):
if self.stack.nested_depth >= cfg.CONF.max_nested_stack_depth:
msg = _("Recursion depth exceeds %d."
) % cfg.CONF.max_nested_stack_depth
raise exception.RequestLimitExceeded(message=msg)
return self.stack.nested_depth + 1
def _child_parsed_template(self, child_template, child_env):
parsed_template = self._parse_child_template(child_template, child_env)
self._validate_nested_resources(parsed_template)
# Don't overwrite the attributes_schema for subclasses that
# define their own attributes_schema.
if not hasattr(type(self), 'attributes_schema'):
self.attributes = None
self._outputs_to_attribs(parsed_template)
return parsed_template
def _validate_nested_resources(self, templ):
if cfg.CONF.max_resources_per_stack == -1:
return
total_resources = (len(templ[templ.RESOURCES]) +
self.stack.total_resources(self.root_stack_id))
if self.nested():
# It's an update and these resources will be deleted
total_resources -= len(self.nested().resources)
if (total_resources > cfg.CONF.max_resources_per_stack):
message = exception.StackResourceLimitExceeded.msg_fmt
raise exception.RequestLimitExceeded(message=message)
def create_with_template(self, child_template, user_params=None,
timeout_mins=None, adopt_data=None):
"""Create the nested stack with the given template."""
name = self.physical_resource_name()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
stack_user_project_id = self.stack.stack_user_project_id
kwargs = self._stack_kwargs(user_params, child_template)
adopt_data_str = None
if adopt_data is not None:
if 'environment' not in adopt_data:
adopt_data['environment'] = kwargs['params']
if 'template' not in adopt_data:
if isinstance(child_template, template.Template):
adopt_data['template'] = child_template.t
else:
adopt_data['template'] = child_template
adopt_data_str = json.dumps(adopt_data)
args = {rpc_api.PARAM_TIMEOUT: timeout_mins,
rpc_api.PARAM_DISABLE_ROLLBACK: True,
rpc_api.PARAM_ADOPT_STACK_DATA: adopt_data_str}
kwargs.update({
'stack_name': name,
'args': args,
'environment_files': None,
'owner_id': self.stack.id,
'user_creds_id': self.stack.user_creds_id,
'stack_user_project_id': stack_user_project_id,
'nested_depth': self._child_nested_depth(),
'parent_resource_name': self.name
})
with self.translate_remote_exceptions:
result = self.rpc_client()._create_stack(self.context, **kwargs)
self.resource_id_set(result['stack_id'])
def _stack_kwargs(self, user_params, child_template):
if user_params is None:
user_params = self.child_params()
if child_template is None:
child_template = self.child_template()
child_env = environment.get_child_environment(
self.stack.env,
user_params,
child_resource_name=self.name,
item_to_remove=self.resource_info)
parsed_template = self._child_parsed_template(child_template,
child_env)
return {
'template': parsed_template.t,
'params': child_env.user_env_as_dict(),
'files': parsed_template.files
}
def raise_local_exception(self, ex):
warnings.warn('raise_local_exception() is deprecated. Use the '
'translate_remote_exceptions context manager instead.',
DeprecationWarning)
return self.translate_remote_exceptions(ex)
@ExceptionFilter
def translate_remote_exceptions(self, ex):
if (isinstance(ex, exception.ActionInProgress) and
self.stack.action == self.stack.ROLLBACK):
# The update was interrupted and the rollback is already in
# progress, so just ignore the error and wait for the rollback to
# finish
return True
class_name = reflection.get_class_name(ex, fully_qualified=False)
if not class_name.endswith('_Remote'):
return False
full_message = six.text_type(ex)
if full_message.find('\n') > -1:
message, msg_trace = full_message.split('\n', 1)
else:
message = full_message
raise exception.ResourceFailure(message, self, action=self.action)
def check_create_complete(self, cookie=None):
return self._check_status_complete(self.CREATE)
def _check_status_complete(self, expected_action, cookie=None):
try:
data = stack_object.Stack.get_status(self.context,
self.resource_id)
except exception.NotFound:
if expected_action == self.DELETE:
return True
# It's possible the engine handling the create hasn't persisted
# the stack to the DB when we first start polling for state
return False
action, status, status_reason, updated_time = data
if action != expected_action:
return False
# Has the action really started?
#
# The rpc call to update does not guarantee that the stack will be
# placed into IN_PROGRESS by the time it returns (it runs stack.update
# in a thread) so you could also have a situation where we get into
# this method and the update hasn't even started.
#
# So we are using a mixture of state (action+status) and updated_at
# to see if the action has actually progressed.
# - very fast updates (like something with one RandomString) we will
# probably miss the state change, but we should catch the updated_at.
# - very slow updates we won't see the updated_at for quite a while,
# but should see the state change.
if cookie is not None:
prev_state = cookie['previous']['state']
prev_updated_at = cookie['previous']['updated_at']
if (prev_updated_at == updated_time and
prev_state == (action, status)):
return False
if status == self.IN_PROGRESS:
return False
elif status == self.COMPLETE:
ret = stack_lock.StackLock.get_engine_id(self.resource_id) is None
if ret:
# Reset nested, to indicate we changed status
self._nested = None
return ret
elif status == self.FAILED:
raise exception.ResourceFailure(status_reason, self,
action=action)
else:
raise exception.ResourceUnknownStatus(
resource_status=status,
status_reason=status_reason,
result=_('Stack unknown status'))
def check_adopt_complete(self, cookie=None):
return self._check_status_complete(self.ADOPT)
def update_with_template(self, child_template, user_params=None,
timeout_mins=None):
"""Update the nested stack with the new template."""
if self.id is None:
self._store()
nested_stack = self.nested()
if nested_stack is None:
# if the create failed for some reason and the nested
# stack was not created, we need to create an empty stack
# here so that the update will work.
def _check_for_completion():
while not self.check_create_complete():
yield
empty_temp = template_format.parse(
"heat_template_version: '2013-05-23'")
self.create_with_template(empty_temp, {})
checker = scheduler.TaskRunner(_check_for_completion)
checker(timeout=self.stack.timeout_secs())
nested_stack = self.nested()
if timeout_mins is None:
timeout_mins = self.stack.timeout_mins
kwargs = self._stack_kwargs(user_params, child_template)
cookie = {'previous': {
'updated_at': nested_stack.updated_time,
'state': nested_stack.state}}
kwargs.update({
'stack_identity': dict(nested_stack.identifier()),
'args': {rpc_api.PARAM_TIMEOUT: timeout_mins}
})
with self.translate_remote_exceptions:
self.rpc_client().update_stack(self.context, **kwargs)
return cookie
def check_update_complete(self, cookie=None):
return self._check_status_complete(self.UPDATE,
cookie=cookie)
def delete_nested(self):
"""Delete the nested stack."""
stack = self.nested()
if stack is None:
return
stack_identity = dict(stack.identifier())
try:
if self.abandon_in_progress:
self.rpc_client().abandon_stack(self.context, stack_identity)
else:
self.rpc_client().delete_stack(self.context, stack_identity,
cast=False)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'EntityNotFound')
def handle_delete(self):
return self.delete_nested()
def check_delete_complete(self, cookie=None):
return self._check_status_complete(self.DELETE)
def handle_suspend(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot suspend %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_suspend(self.context, dict(stack_identity))
def check_suspend_complete(self, cookie=None):
return self._check_status_complete(self.SUSPEND)
def handle_resume(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot resume %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_resume(self.context, dict(stack_identity))
def check_resume_complete(self, cookie=None):
return self._check_status_complete(self.RESUME)
def handle_check(self):
stack = self.nested()
if stack is None:
raise exception.Error(_('Cannot check %s, stack not created')
% self.name)
stack_identity = identifier.HeatIdentifier(
self.context.tenant_id,
self.physical_resource_name(),
self.resource_id)
self.rpc_client().stack_check(self.context, dict(stack_identity))
def check_check_complete(self, cookie=None):
return self._check_status_complete(self.CHECK)
def prepare_abandon(self):
self.abandon_in_progress = True
nested_stack = self.nested()
if nested_stack:
return self.nested().prepare_abandon()
return {}
def get_output(self, op):
"""Return the specified Output value from the nested stack.
If the output key does not exist, raise an InvalidTemplateAttribute
exception.
"""
stack = self.nested()
if stack is None:
return None
if op not in stack.outputs:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=op)
result = stack.output(op)
if result is None and stack.outputs[op].get('error_msg') is not None:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=op)
return result
def _resolve_attribute(self, name):
return self.get_output(name)
|
Hybrid-Cloud/conveyor
|
conveyor/conveyorheat/engine/resources/stack_resource.py
|
Python
|
apache-2.0
| 21,855
|
# Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from keystoneauth1 import loading
import ceilometer.agent.manager
import ceilometer.api
import ceilometer.api.app
import ceilometer.cmd.polling
import ceilometer.collector
import ceilometer.compute.discovery
import ceilometer.compute.notifications
import ceilometer.compute.util
import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.inspector
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
import ceilometer.coordination
import ceilometer.dispatcher
import ceilometer.dispatcher.file
import ceilometer.dispatcher.gnocchi
import ceilometer.energy.kwapi
import ceilometer.event.converter
import ceilometer.hardware.discovery
import ceilometer.image.glance
import ceilometer.ipmi.notifications.ironic
import ceilometer.ipmi.platform.intel_node_manager
import ceilometer.ipmi.pollsters
import ceilometer.keystone_client
import ceilometer.meter.notifications
import ceilometer.middleware
import ceilometer.network.notifications
import ceilometer.neutron_client
import ceilometer.notification
import ceilometer.nova_client
import ceilometer.objectstore.rgw
import ceilometer.objectstore.swift
import ceilometer.pipeline
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
import ceilometer.service
import ceilometer.storage
import ceilometer.utils
def list_opts():
return [
('DEFAULT',
itertools.chain(ceilometer.agent.manager.OPTS,
ceilometer.api.app.OPTS,
ceilometer.cmd.polling.CLI_OPTS,
ceilometer.compute.notifications.OPTS,
ceilometer.compute.util.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.inspector.OPTS,
ceilometer.dispatcher.OPTS,
ceilometer.image.glance.OPTS,
ceilometer.ipmi.notifications.ironic.OPTS,
ceilometer.middleware.OPTS,
ceilometer.network.notifications.OPTS,
ceilometer.nova_client.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.sample.OPTS,
ceilometer.service.OPTS,
ceilometer.storage.OLD_OPTS,
ceilometer.storage.CLI_OPTS,
ceilometer.utils.OPTS,)),
('api',
itertools.chain(ceilometer.api.OPTS,
ceilometer.api.app.API_OPTS,
[ceilometer.service.API_OPT])),
# deprecated path, new one is 'polling'
('central', ceilometer.agent.manager.OPTS),
('collector',
itertools.chain(ceilometer.collector.OPTS,
[ceilometer.service.COLL_OPT])),
('compute', ceilometer.compute.discovery.OPTS),
('coordination', ceilometer.coordination.OPTS),
('database', ceilometer.storage.OPTS),
('dispatcher_file', ceilometer.dispatcher.file.OPTS),
('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi.dispatcher_opts),
('event', ceilometer.event.converter.OPTS),
('exchange_control', ceilometer.exchange_control.EXCHANGE_OPTS),
('hardware', ceilometer.hardware.discovery.OPTS),
('ipmi',
itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,
ceilometer.ipmi.pollsters.OPTS)),
('meter', ceilometer.meter.notifications.OPTS),
('notification',
itertools.chain(ceilometer.notification.OPTS,
[ceilometer.service.NOTI_OPT])),
('polling', ceilometer.agent.manager.OPTS),
('publisher', ceilometer.publisher.utils.OPTS),
('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS),
('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS),
# NOTE(sileht): the configuration file contains only the options
# for the password plugin that handles keystone v2 and v3 API
# with discovery. But other options are possible.
# Also, the default loaded plugin is password-ceilometer-legacy for
# backward compatibily
('service_credentials', (
ceilometer.keystone_client.CLI_OPTS +
loading.get_auth_common_conf_options() +
loading.get_auth_plugin_conf_options('password'))),
('service_types',
itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS,
ceilometer.image.glance.SERVICE_OPTS,
ceilometer.neutron_client.SERVICE_OPTS,
ceilometer.nova_client.SERVICE_OPTS,
ceilometer.objectstore.rgw.SERVICE_OPTS,
ceilometer.objectstore.swift.SERVICE_OPTS,)),
('storage', ceilometer.dispatcher.STORAGE_OPTS),
('vmware', ceilometer.compute.virt.vmware.inspector.OPTS),
('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS),
]
|
idegtiarov/ceilometer
|
ceilometer/opts.py
|
Python
|
apache-2.0
| 5,763
|
#!/usr/bin/env python
#Copyright 2013 Matthew Thode
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
Write a script that creates a CDN-enabled container in Cloud Files.
Worth 1 Point
"""
import pyrax
from os.path import expanduser
if __name__ == '__main__':
#initialize pyrax and create variables
pyrax.set_credential_file(expanduser("~/.rackspace_cloud_credentials"))
cf = pyrax.cloudfiles
cont = cf.create_container("example")
cf.make_container_public("example", ttl=900)
|
prometheanfire/rackspace-challenges
|
challenge6.py
|
Python
|
apache-2.0
| 991
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import numpy
import scipy.sparse.linalg
import cirq
import openfermion
import pytest
from openfermioncirq import simulate_trotter
from openfermioncirq.trotter import (
LINEAR_SWAP_NETWORK,
LOW_RANK,
LowRankTrotterAlgorithm,
SPLIT_OPERATOR,
TrotterAlgorithm,
)
from openfermioncirq.trotter.trotter_algorithm import Hamiltonian
def fidelity(state1, state2):
return abs(numpy.dot(state1, numpy.conjugate(state2)))**2
def produce_simulation_test_parameters(
hamiltonian: Hamiltonian,
time: float,
seed: Optional[int]=None
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Produce objects for testing Hamiltonian simulation.
Produces a random initial state and evolves it under the given Hamiltonian
for the specified amount of time. Returns the initial state and final
state.
Args:
hamiltonian: The Hamiltonian to evolve under.
time: The time to evolve for
seed: An RNG seed.
"""
n_qubits = openfermion.count_qubits(hamiltonian)
# Construct a random initial state
initial_state = openfermion.haar_random_vector(2**n_qubits, seed)
# Simulate exact evolution
hamiltonian_sparse = openfermion.get_sparse_operator(hamiltonian)
exact_state = scipy.sparse.linalg.expm_multiply(
-1j * time * hamiltonian_sparse, initial_state)
# Make sure the time is not too small
assert fidelity(exact_state, initial_state) < .95
return initial_state, exact_state
# Produce test parameters
longer_time = 1.0
long_time = 0.1
short_time = 0.05
# 5-qubit random DiagonalCoulombHamiltonian
diag_coul_hamiltonian = openfermion.random_diagonal_coulomb_hamiltonian(
5, real=False, seed=65233)
diag_coul_initial_state, diag_coul_exact_state = (
produce_simulation_test_parameters(
diag_coul_hamiltonian, long_time, seed=49075)
)
# Hubbard model, reordered
hubbard_model = openfermion.fermi_hubbard(2, 2, 1.0, 4.0)
hubbard_model = openfermion.reorder(hubbard_model, openfermion.up_then_down)
hubbard_hamiltonian = openfermion.get_diagonal_coulomb_hamiltonian(
hubbard_model)
hubbard_initial_state, hubbard_exact_state = (
produce_simulation_test_parameters(
hubbard_hamiltonian, long_time, seed=8200)
)
# 4-qubit H2 2-2 with bond length 0.7414
bond_length = 0.7414
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]
h2_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
h2_initial_state, h2_exact_state = produce_simulation_test_parameters(
h2_hamiltonian, longer_time, seed=44303)
# 4-qubit LiH 2-2 with bond length 1.45
bond_length = 1.45
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., bond_length))]
lih_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
lih_initial_state, lih_exact_state = produce_simulation_test_parameters(
lih_hamiltonian, longer_time, seed=54458)
@pytest.mark.parametrize(
'hamiltonian, time, initial_state, exact_state, order, n_steps, '
'algorithm, result_fidelity', [
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 5, None, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 12, None, .999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 1, 1, LINEAR_SWAP_NETWORK, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 2, 1, LINEAR_SWAP_NETWORK, .99999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 3, SPLIT_OPERATOR, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 6, SPLIT_OPERATOR, .999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 1, 1, SPLIT_OPERATOR, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 2, 1, SPLIT_OPERATOR, .99999),
(hubbard_hamiltonian, long_time, hubbard_initial_state,
hubbard_exact_state, 0, 3, SPLIT_OPERATOR, .999),
(hubbard_hamiltonian, long_time, hubbard_initial_state,
hubbard_exact_state, 0, 6, SPLIT_OPERATOR, .9999),
(h2_hamiltonian, longer_time, h2_initial_state,
h2_exact_state, 0, 1, LOW_RANK, .99),
(h2_hamiltonian, longer_time, h2_initial_state,
h2_exact_state, 0, 10, LOW_RANK, .9999),
(lih_hamiltonian, longer_time, lih_initial_state, lih_exact_state,
0, 1, LowRankTrotterAlgorithm(final_rank=2), .999),
(lih_hamiltonian, longer_time, lih_initial_state, lih_exact_state,
0, 10, LowRankTrotterAlgorithm(final_rank=2), .9999),
])
def test_simulate_trotter_simulate(
hamiltonian, time, initial_state, exact_state, order, n_steps,
algorithm, result_fidelity):
n_qubits = openfermion.count_qubits(hamiltonian)
qubits = cirq.LineQubit.range(n_qubits)
start_state = initial_state
circuit = cirq.Circuit(simulate_trotter(
qubits, hamiltonian, time, n_steps, order, algorithm))
final_state = circuit.final_wavefunction(start_state)
correct_state = exact_state
assert fidelity(final_state, correct_state) > result_fidelity
# Make sure the time wasn't too small
assert fidelity(final_state, start_state) < 0.95 * result_fidelity
@pytest.mark.parametrize(
'hamiltonian, time, initial_state, exact_state, order, n_steps, '
'algorithm, result_fidelity', [
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 5, None, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 12, None, .999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 1, 1, LINEAR_SWAP_NETWORK, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 2, 1, LINEAR_SWAP_NETWORK, .99999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 3, SPLIT_OPERATOR, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 0, 6, SPLIT_OPERATOR, .999),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 1, 1, SPLIT_OPERATOR, .99),
(diag_coul_hamiltonian, long_time, diag_coul_initial_state,
diag_coul_exact_state, 2, 1, SPLIT_OPERATOR, .99999),
(h2_hamiltonian, longer_time, h2_initial_state,
h2_exact_state, 0, 1, LOW_RANK, .99),
(h2_hamiltonian, longer_time, h2_initial_state,
h2_exact_state, 0, 10, LOW_RANK, .9999),
(lih_hamiltonian, longer_time, lih_initial_state, lih_exact_state,
0, 1, LowRankTrotterAlgorithm(final_rank=2), .999),
(lih_hamiltonian, longer_time, lih_initial_state, lih_exact_state,
0, 10, LowRankTrotterAlgorithm(final_rank=2), .9999),
])
def test_simulate_trotter_simulate_controlled(
hamiltonian, time, initial_state, exact_state, order, n_steps,
algorithm, result_fidelity):
n_qubits = openfermion.count_qubits(hamiltonian)
qubits = cirq.LineQubit.range(n_qubits)
control = cirq.LineQubit(-1)
zero = [1, 0]
one = [0, 1]
start_state = (numpy.kron(zero, initial_state)
+ numpy.kron(one, initial_state)) / numpy.sqrt(2)
circuit = cirq.Circuit(simulate_trotter(
qubits, hamiltonian, time, n_steps, order, algorithm, control))
final_state = circuit.final_wavefunction(start_state)
correct_state = (numpy.kron(zero, initial_state)
+ numpy.kron(one, exact_state)) / numpy.sqrt(2)
assert fidelity(final_state, correct_state) > result_fidelity
# Make sure the time wasn't too small
assert fidelity(final_state, start_state) < 0.95 * result_fidelity
def test_simulate_trotter_omit_final_swaps():
n_qubits = 5
qubits = cirq.LineQubit.range(n_qubits)
hamiltonian = openfermion.DiagonalCoulombHamiltonian(
one_body=numpy.ones((n_qubits, n_qubits)),
two_body=numpy.ones((n_qubits, n_qubits)))
time = 1.0
circuit_with_swaps = cirq.Circuit(
simulate_trotter(
qubits, hamiltonian, time, order=0,
algorithm=LINEAR_SWAP_NETWORK))
circuit_without_swaps = cirq.Circuit(
simulate_trotter(
qubits, hamiltonian, time, order=0,
algorithm=LINEAR_SWAP_NETWORK,
omit_final_swaps=True))
assert len(circuit_without_swaps) < len(circuit_with_swaps)
circuit_with_swaps = cirq.Circuit(
simulate_trotter(
qubits,
hamiltonian,
time,
order=1,
n_steps=3,
algorithm=SPLIT_OPERATOR),
strategy=cirq.InsertStrategy.NEW)
circuit_without_swaps = cirq.Circuit(
simulate_trotter(
qubits,
hamiltonian,
time,
order=1,
n_steps=3,
algorithm=SPLIT_OPERATOR,
omit_final_swaps=True),
strategy=cirq.InsertStrategy.NEW)
assert len(circuit_without_swaps) < len(circuit_with_swaps)
hamiltonian = lih_hamiltonian
qubits = cirq.LineQubit.range(4)
circuit_with_swaps = cirq.Circuit(
simulate_trotter(
qubits, hamiltonian, time, order=0,
algorithm=LOW_RANK))
circuit_without_swaps = cirq.Circuit(
simulate_trotter(
qubits, hamiltonian, time, order=0,
algorithm=LOW_RANK,
omit_final_swaps=True))
assert len(circuit_without_swaps) < len(circuit_with_swaps)
def test_simulate_trotter_bad_order_raises_error():
qubits = cirq.LineQubit.range(2)
hamiltonian = openfermion.random_diagonal_coulomb_hamiltonian(2, seed=0)
time = 1.0
with pytest.raises(ValueError):
_ = next(simulate_trotter(qubits, hamiltonian, time, order=-1))
def test_simulate_trotter_bad_hamiltonian_type_raises_error():
qubits = cirq.LineQubit.range(2)
hamiltonian = openfermion.FermionOperator()
time = 1.0
with pytest.raises(TypeError):
_ = next(simulate_trotter(qubits, hamiltonian, time,
algorithm=None))
with pytest.raises(TypeError):
_ = next(simulate_trotter(qubits, hamiltonian, time,
algorithm=LINEAR_SWAP_NETWORK))
def test_simulate_trotter_unsupported_trotter_step_raises_error():
qubits = cirq.LineQubit.range(2)
control = cirq.LineQubit(-1)
hamiltonian = openfermion.random_diagonal_coulomb_hamiltonian(2, seed=0)
time = 1.0
class EmptyTrotterAlgorithm(TrotterAlgorithm):
supported_types = {openfermion.DiagonalCoulombHamiltonian}
algorithm = EmptyTrotterAlgorithm()
with pytest.raises(ValueError):
_ = next(simulate_trotter(qubits, hamiltonian, time, order=0,
algorithm=algorithm))
with pytest.raises(ValueError):
_ = next(simulate_trotter(qubits, hamiltonian, time, order=1,
algorithm=algorithm))
with pytest.raises(ValueError):
_ = next(simulate_trotter(qubits, hamiltonian, time, order=0,
algorithm=algorithm, control_qubit=control))
with pytest.raises(ValueError):
_ = next(simulate_trotter(qubits, hamiltonian, time, order=1,
algorithm=algorithm, control_qubit=control))
@pytest.mark.parametrize('algorithm_type,hamiltonian', [
(LINEAR_SWAP_NETWORK, openfermion.random_diagonal_coulomb_hamiltonian(2)),
(LOW_RANK, openfermion.random_interaction_operator(2)),
(SPLIT_OPERATOR, openfermion.random_diagonal_coulomb_hamiltonian(2)),
])
def test_trotter_misspecified_control_raises_error(algorithm_type, hamiltonian):
qubits = cirq.LineQubit.range(2)
time = 2.
algorithms = [algorithm_type.controlled_asymmetric(hamiltonian),
algorithm_type.controlled_symmetric(hamiltonian)]
for algorithm in algorithms:
if algorithm is None:
continue
with pytest.raises(TypeError):
next(algorithm.trotter_step(qubits, time))
with pytest.raises(TypeError):
next(algorithm.trotter_step(qubits, time, control_qubit=2))
|
quantumlib/OpenFermion-Cirq
|
openfermioncirq/trotter/simulate_trotter_test.py
|
Python
|
apache-2.0
| 13,634
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tableacl.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tableacl.proto',
package='tableacl',
syntax='proto3',
serialized_pb=_b('\n\x0etableacl.proto\x12\x08tableacl\"q\n\x0eTableGroupSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1f\n\x17table_names_or_prefixes\x18\x02 \x03(\t\x12\x0f\n\x07readers\x18\x03 \x03(\t\x12\x0f\n\x07writers\x18\x04 \x03(\t\x12\x0e\n\x06\x61\x64mins\x18\x05 \x03(\t\"8\n\x06\x43onfig\x12.\n\x0ctable_groups\x18\x01 \x03(\x0b\x32\x18.tableacl.TableGroupSpecb\x06proto3')
)
_TABLEGROUPSPEC = _descriptor.Descriptor(
name='TableGroupSpec',
full_name='tableacl.TableGroupSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tableacl.TableGroupSpec.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='table_names_or_prefixes', full_name='tableacl.TableGroupSpec.table_names_or_prefixes', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='readers', full_name='tableacl.TableGroupSpec.readers', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='writers', full_name='tableacl.TableGroupSpec.writers', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='admins', full_name='tableacl.TableGroupSpec.admins', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=141,
)
_CONFIG = _descriptor.Descriptor(
name='Config',
full_name='tableacl.Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_groups', full_name='tableacl.Config.table_groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=199,
)
_CONFIG.fields_by_name['table_groups'].message_type = _TABLEGROUPSPEC
DESCRIPTOR.message_types_by_name['TableGroupSpec'] = _TABLEGROUPSPEC
DESCRIPTOR.message_types_by_name['Config'] = _CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TableGroupSpec = _reflection.GeneratedProtocolMessageType('TableGroupSpec', (_message.Message,), dict(
DESCRIPTOR = _TABLEGROUPSPEC,
__module__ = 'tableacl_pb2'
# @@protoc_insertion_point(class_scope:tableacl.TableGroupSpec)
))
_sym_db.RegisterMessage(TableGroupSpec)
Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), dict(
DESCRIPTOR = _CONFIG,
__module__ = 'tableacl_pb2'
# @@protoc_insertion_point(class_scope:tableacl.Config)
))
_sym_db.RegisterMessage(Config)
# @@protoc_insertion_point(module_scope)
|
alainjobart/vitess
|
py/vtproto/tableacl_pb2.py
|
Python
|
apache-2.0
| 4,714
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Token provider interface."""
import abc
import base64
import datetime
import sys
import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _, _LE
from keystone.models import token_model
from keystone import notifications
from keystone.token import persistence
from keystone.token import providers
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(group='token')
# NOTE(morganfainberg): This is for compatibility in case someone was relying
# on the old location of the UnsupportedTokenVersionException for their code.
UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException
# supported token versions
V2 = token_model.V2
V3 = token_model.V3
VERSIONS = token_model.VERSIONS
def base64_encode(s):
"""Encode a URL-safe string.
:type s: six.text_type
:rtype: six.text_type
"""
# urlsafe_b64encode() returns six.binary_type so need to convert to
# six.text_type, might as well do it before stripping.
return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=')
def random_urlsafe_str():
"""Generate a random URL-safe string.
:rtype: six.text_type
"""
# chop the padding (==) off the end of the encoding to save space
return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2].decode('utf-8')
def random_urlsafe_str_to_bytes(s):
"""Convert a string from :func:`random_urlsafe_str()` to six.binary_type.
:type s: six.text_type
:rtype: six.binary_type
"""
# urlsafe_b64decode() requires str, unicode isn't accepted.
s = str(s)
# restore the padding (==) at the end of the string
return base64.urlsafe_b64decode(s + '==')
def default_expire_time():
"""Determine when a fresh token should expire.
Expiration time varies based on configuration (see ``[token] expiration``).
:returns: a naive UTC datetime.datetime object
"""
expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
return timeutils.utcnow() + expire_delta
def audit_info(parent_audit_id):
"""Build the audit data for a token.
If ``parent_audit_id`` is None, the list will be one element in length
containing a newly generated audit_id.
If ``parent_audit_id`` is supplied, the list will be two elements in length
containing a newly generated audit_id and the ``parent_audit_id``. The
``parent_audit_id`` will always be element index 1 in the resulting
list.
:param parent_audit_id: the audit of the original token in the chain
:type parent_audit_id: str
:returns: Keystone token audit data
"""
audit_id = random_urlsafe_str()
if parent_audit_id is not None:
return [audit_id, parent_audit_id]
return [audit_id]
@dependency.provider('token_provider_api')
@dependency.requires('assignment_api', 'revoke_api')
class Manager(manager.Manager):
"""Default pivot point for the token provider backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.token.provider'
V2 = V2
V3 = V3
VERSIONS = VERSIONS
INVALIDATE_PROJECT_TOKEN_PERSISTENCE = 'invalidate_project_tokens'
INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
_persistence_manager = None
def __init__(self):
super(Manager, self).__init__(CONF.token.provider)
self._register_callback_listeners()
def _register_callback_listeners(self):
# This is used by the @dependency.provider decorator to register the
# provider (token_provider_api) manager to listen for trust deletions.
callbacks = {
notifications.ACTIONS.deleted: [
['OS-TRUST:trust', self._trust_deleted_event_callback],
['user', self._delete_user_tokens_callback],
['domain', self._delete_domain_tokens_callback],
],
notifications.ACTIONS.disabled: [
['user', self._delete_user_tokens_callback],
['domain', self._delete_domain_tokens_callback],
['project', self._delete_project_tokens_callback],
],
notifications.ACTIONS.internal: [
[notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
self._delete_user_tokens_callback],
[notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
self._delete_user_project_tokens_callback],
[notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
self._delete_user_oauth_consumer_tokens_callback],
]
}
for event, cb_info in callbacks.items():
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
@property
def _needs_persistence(self):
return self.driver.needs_persistence()
@property
def _persistence(self):
# NOTE(morganfainberg): This should not be handled via __init__ to
# avoid dependency injection oddities circular dependencies (where
# the provider manager requires the token persistence manager, which
# requires the token provider manager).
if self._persistence_manager is None:
self._persistence_manager = persistence.PersistenceManager()
return self._persistence_manager
def _create_token(self, token_id, token_data):
try:
if isinstance(token_data['expires'], six.string_types):
token_data['expires'] = timeutils.normalize_time(
timeutils.parse_isotime(token_data['expires']))
self._persistence.create_token(token_id, token_data)
except Exception:
exc_info = sys.exc_info()
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self._persistence.get_token(token_id)
except exception.TokenNotFound:
six.reraise(*exc_info)
def validate_token(self, token_id, belongs_to=None):
unique_id = utils.generate_unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_token(unique_id)
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def check_revocation_v2(self, token):
try:
token_data = token['access']
except KeyError:
raise exception.TokenNotFound(_('Failed to validate token'))
token_values = self.revoke_api.model.build_token_values_v2(
token_data, CONF.identity.default_domain_id)
self.revoke_api.check_token(token_values)
def validate_v2_token(self, token_id, belongs_to=None):
# NOTE(lbragstad): Only go to the persistence backend if the token
# provider requires it.
if self._needs_persistence:
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
unique_id = utils.generate_unique_id(token_id)
token_ref = self._persistence.get_token(unique_id)
token = self._validate_v2_token(token_ref)
else:
# NOTE(lbragstad): If the token doesn't require persistence, then
# it is a fernet token. The fernet token provider doesn't care if
# it's creating version 2.0 tokens or v3 tokens, so we use the same
# validate_non_persistent_token() method to validate both. Then we
# can leverage a separate method to make version 3 token data look
# like version 2.0 token data. The pattern we want to move towards
# is one where the token providers just handle data and the
# controller layers handle interpreting the token data in a format
# that makes sense for the request.
v3_token_ref = self.validate_non_persistent_token(token_id)
v2_token_data_helper = providers.common.V2TokenDataHelper()
token = v2_token_data_helper.v3_to_v2_token(v3_token_ref)
# these are common things that happen regardless of token provider
token['access']['token']['id'] = token_id
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def check_revocation_v3(self, token):
try:
token_data = token['token']
except KeyError:
raise exception.TokenNotFound(_('Failed to validate token'))
token_values = self.revoke_api.model.build_token_values(token_data)
self.revoke_api.check_token(token_values)
def check_revocation(self, token):
version = self.get_token_version(token)
if version == V2:
return self.check_revocation_v2(token)
else:
return self.check_revocation_v3(token)
def validate_v3_token(self, token_id):
if not token_id:
raise exception.TokenNotFound(_('No token in the request'))
# NOTE(lbragstad): Only go to persistent storage if we have a token to
# fetch from the backend (the driver persists the token). Otherwise
# the information about the token must be in the token id.
if not self._needs_persistence:
token_ref = self.validate_non_persistent_token(token_id)
else:
unique_id = utils.generate_unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token_ref = self._persistence.get_token(unique_id)
token_ref = self._validate_v3_token(token_ref)
self._is_valid_token(token_ref)
return token_ref
@MEMOIZE
def _validate_token(self, token_id):
if not token_id:
raise exception.TokenNotFound(_('No token in the request'))
if not self._needs_persistence:
# NOTE(lbragstad): This will validate v2 and v3 non-persistent
# tokens.
return self.driver.validate_non_persistent_token(token_id)
token_ref = self._persistence.get_token(token_id)
version = self.get_token_version(token_ref)
if version == self.V3:
return self.driver.validate_v3_token(token_ref)
elif version == self.V2:
return self.driver.validate_v2_token(token_ref)
raise exception.UnsupportedTokenVersionException()
@MEMOIZE
def _validate_v2_token(self, token_id):
return self.driver.validate_v2_token(token_id)
@MEMOIZE
def _validate_v3_token(self, token_id):
return self.driver.validate_v3_token(token_id)
def _is_valid_token(self, token):
"""Verify the token is valid format and has not expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
# Get the data we need from the correct location (V2 and V3 tokens
# differ in structure, Try V3 first, fall back to V2 second)
token_data = token.get('token', token.get('access'))
expires_at = token_data.get('expires_at',
token_data.get('expires'))
if not expires_at:
expires_at = token_data['token']['expires']
expiry = timeutils.normalize_time(
timeutils.parse_isotime(expires_at))
except Exception:
LOG.exception(_LE('Unexpected error or malformed token '
'determining token expiry: %s'), token)
raise exception.TokenNotFound(_('Failed to validate token'))
if current_time < expiry:
self.check_revocation(token)
# Token has not expired and has not been revoked.
return None
else:
raise exception.TokenNotFound(_('Failed to validate token'))
def _token_belongs_to(self, token, belongs_to):
"""Check if the token belongs to the right tenant.
This is only used on v2 tokens. The structural validity of the token
will have already been checked before this method is called.
"""
if belongs_to:
token_data = token['access']['token']
if ('tenant' not in token_data or
token_data['tenant']['id'] != belongs_to):
raise exception.Unauthorized()
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
token_id, token_data = self.driver.issue_v2_token(
token_ref, roles_ref, catalog_ref)
if self._needs_persistence:
data = dict(key=token_id,
id=token_id,
expires=token_data['access']['token']['expires'],
user=token_ref['user'],
tenant=token_ref['tenant'],
metadata=token_ref['metadata'],
token_data=token_data,
bind=token_ref.get('bind'),
trust_id=token_ref['metadata'].get('trust_id'),
token_version=self.V2)
self._create_token(token_id, data)
return token_id, token_data
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
token_id, token_data = self.driver.issue_v3_token(
user_id, method_names, expires_at, project_id, domain_id,
auth_context, trust, metadata_ref, include_catalog,
parent_audit_id)
if metadata_ref is None:
metadata_ref = {}
if 'project' in token_data['token']:
# project-scoped token, fill in the v2 token data
# all we care are the role IDs
# FIXME(gyee): is there really a need to store roles in metadata?
role_ids = [r['id'] for r in token_data['token']['roles']]
metadata_ref = {'roles': role_ids}
if trust:
metadata_ref.setdefault('trust_id', trust['id'])
metadata_ref.setdefault('trustee_user_id',
trust['trustee_user_id'])
data = dict(key=token_id,
id=token_id,
expires=token_data['token']['expires_at'],
user=token_data['token']['user'],
tenant=token_data['token'].get('project'),
metadata=metadata_ref,
token_data=token_data,
trust_id=trust['id'] if trust else None,
token_version=self.V3)
if self._needs_persistence:
self._create_token(token_id, data)
return token_id, token_data
def invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._validate_token.invalidate(self, token_id)
self._validate_v2_token.invalidate(self, token_id)
self._validate_v3_token.invalidate(self, token_id)
def revoke_token(self, token_id, revoke_chain=False):
revoke_by_expires = False
project_id = None
domain_id = None
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.validate_token(token_id))
user_id = token_ref.user_id
expires_at = token_ref.expires
audit_id = token_ref.audit_id
audit_chain_id = token_ref.audit_chain_id
if token_ref.project_scoped:
project_id = token_ref.project_id
if token_ref.domain_scoped:
domain_id = token_ref.domain_id
if audit_id is None and not revoke_chain:
LOG.debug('Received token with no audit_id.')
revoke_by_expires = True
if audit_chain_id is None and revoke_chain:
LOG.debug('Received token with no audit_chain_id.')
revoke_by_expires = True
if revoke_by_expires:
self.revoke_api.revoke_by_expiration(user_id, expires_at,
project_id=project_id,
domain_id=domain_id)
elif revoke_chain:
self.revoke_api.revoke_by_audit_chain_id(audit_chain_id,
project_id=project_id,
domain_id=domain_id)
else:
self.revoke_api.revoke_by_audit_id(audit_id)
if CONF.token.revoke_by_id and self._needs_persistence:
self._persistence.delete_token(token_id=token_id)
def list_revoked_tokens(self):
return self._persistence.list_revoked_tokens()
def _trust_deleted_event_callback(self, service, resource_type, operation,
payload):
if CONF.token.revoke_by_id:
trust_id = payload['resource_info']
trust = self.trust_api.get_trust(trust_id, deleted=True)
self._persistence.delete_tokens(user_id=trust['trustor_user_id'],
trust_id=trust_id)
def _delete_user_tokens_callback(self, service, resource_type, operation,
payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']
self._persistence.delete_tokens_for_user(user_id)
def _delete_domain_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
domain_id = payload['resource_info']
self._persistence.delete_tokens_for_domain(domain_id=domain_id)
def _delete_user_project_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']['user_id']
project_id = payload['resource_info']['project_id']
self._persistence.delete_tokens_for_user(user_id=user_id,
project_id=project_id)
def _delete_project_tokens_callback(self, service, resource_type,
operation, payload):
if CONF.token.revoke_by_id:
project_id = payload['resource_info']
self._persistence.delete_tokens_for_users(
self.assignment_api.list_user_ids_for_project(project_id),
project_id=project_id)
def _delete_user_oauth_consumer_tokens_callback(self, service,
resource_type, operation,
payload):
if CONF.token.revoke_by_id:
user_id = payload['resource_info']['user_id']
consumer_id = payload['resource_info']['consumer_id']
self._persistence.delete_tokens(user_id=user_id,
consumer_id=consumer_id)
@six.add_metaclass(abc.ABCMeta)
class Provider(object):
"""Interface description for a Token provider."""
@abc.abstractmethod
def needs_persistence(self):
"""Determine if the token should be persisted.
If the token provider requires that the token be persisted to a
backend this should return True, otherwise return False.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_token_version(self, token_data):
"""Return the version of the given token data.
If the given token data is unrecognizable,
UnsupportedTokenVersionException is raised.
:param token_data: token_data
:type token_data: dict
:returns: token version string
:raises keystone.exception.UnsupportedTokenVersionException:
If the token version is not expected.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
"""Issue a V2 token.
:param token_ref: token data to generate token from
:type token_ref: dict
:param roles_ref: optional roles list
:type roles_ref: dict
:param catalog_ref: optional catalog information
:type catalog_ref: dict
:returns: (token_id, token_data)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
"""Issue a V3 Token.
:param user_id: identity of the user
:type user_id: string
:param method_names: names of authentication methods
:type method_names: list
:param expires_at: optional time the token will expire
:type expires_at: string
:param project_id: optional project identity
:type project_id: string
:param domain_id: optional domain identity
:type domain_id: string
:param auth_context: optional context from the authorization plugins
:type auth_context: dict
:param trust: optional trust reference
:type trust: dict
:param metadata_ref: optional metadata reference
:type metadata_ref: dict
:param include_catalog: optional, include the catalog in token data
:type include_catalog: boolean
:param parent_audit_id: optional, the audit id of the parent token
:type parent_audit_id: string
:returns: (token_id, token_data)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def validate_v2_token(self, token_ref):
"""Validate the given V2 token and return the token data.
Must raise Unauthorized exception if unable to validate token.
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
:raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def validate_non_persistent_token(self, token_id):
"""Validate a given non-persistent token id and return the token_data.
:param token_id: the token id
:type token_id: string
:returns: token data
:raises keystone.exception.TokenNotFound: When the token is invalid
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def validate_v3_token(self, token_ref):
"""Validate the given V3 token and return the token_data.
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
:raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
:returns: token identifier
:rtype: six.text_type
"""
raise exception.NotImplemented() # pragma: no cover
|
dims/keystone
|
keystone/token/provider.py
|
Python
|
apache-2.0
| 25,056
|
import os
import shutil
import unittest
import zopkio.runtime as runtime
class Args:
def __init__(self):
self.output_dir = None
self.log_level = "INFO"
self.console_level = "INFO"
self.machine_list = None
self.config_overrides = None
self.user = None
self.password = None
self.test_list = None
self.nopassword = True
class TestZopkioMainRunner(unittest.TestCase):
"""
Test zopkio at an integrated level, running zopkio test suites which
themselves contain failures and successes and compare expected failured/
success count at end of zopkio-level integrated test run
"""
def _run_zopkio(self, args):
import sys, os.path
pwd = os.path.abspath('.')
try:
os.chdir(os.path.join(os.path.dirname(__file__),".."))
sys.args = args
print("Running 'zopkio %s %s'"%(args.testfile, args.nopassword))
from zopkio import __main__ as main
succeeded, failed = main.call_main(args)
except:
os.chdir( pwd )
raise
else:
return succeeded, failed
def test_zopkio_launch(self):
"""
Run server client test suites and
compare to expected outcome on test failures/successes
"""
runtime.reset_all()
args = Args()
args.testfile = "./examples/server_client/server_client.py"
succeeded, failed = self._run_zopkio(args)
self.assertTrue( succeeded >= 4)
self.assertTrue( failed >= 16)
if __name__ == '__main__':
unittest.main()
|
arpras/Zopkio
|
test/test_zopkio.py
|
Python
|
apache-2.0
| 1,464
|
#Shared Knowledge save to the cloud
def SaveMemory(question,reponse,silent,justPredicates):
sleep(0.5)
chatBot.savePredicates()
if justPredicates==0:
ServerResponse="0"
RetourServer=Parse("http://www.myai.cloud/shared_memory.php?action=update&question="+urllib2.quote(question)+"&reponse="+urllib2.quote(reponse.replace("'", " ")))
print "http://www.myai.cloud/shared_memory.php?action=update&question="+urllib2.quote(question)+"&reponse="+urllib2.quote(reponse.replace("'", " "))
if silent<>1:
chatBot.getResponse("SAVEMEMORY")
#Shared Knowledge save to local programab config file
def SaveMemoryPersonal(question,ReturnSubject,record):
if str(record)=="0":
valueQuestion=chatBot.getPredicate("default",question).decode( "utf8" )
if valueQuestion=="unknown":
chatBot.getResponse("SaveMemoryPersonal "+unicode(ReturnSubject,'utf-8')+" "+unicode(question,'utf-8'))
else:
chatBot.getResponse(unicode(ReturnSubject,'utf-8') + " " + unicode(question,'utf-8') + " LECTURENOM " + " " + unicode(valueQuestion,'utf-8'))
else:
chatBot.setPredicate("default",question,record)
chatBot.savePredicates()
def ClearMemory():
chatBot.setPredicate("default","topic","default")
chatBot.setPredicate("default","QUESTION_WhoOrWhat","")
chatBot.setPredicate("default","QUESTION_sujet","")
chatBot.setPredicate("default","QUESTION_action","")
def QueryMemory(question,retourNok,retourOk):
RetourServer=Parse("http://www.myai.cloud/shared_memory.php?action=select&question="+urllib2.quote(question))
if RetourServer!="" and RetourServer!="0":
chatBot.getResponse(retourOk + " " + RetourServer)
else:
chatBot.getResponse(retourNok)
|
MyRobotLab/pyrobotlab
|
home/moz4r/deprecated/Inmoov/InmoovScript_InmoovAI/INMOOV-AI_memory.py
|
Python
|
apache-2.0
| 1,664
|
import multiprocessing
import json
import time
import sys
import re
import logging
from collections import defaultdict
from jinja2 import Template
from scipy.interpolate import spline
from scipy.signal import butter, lfilter
import matplotlib.pyplot as plt
import numpy as np
"""
stats = {
'switches': defaultdict(dict)
}
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
low_limit = 100
high_limit = 3000
def _read_pipe(stats):
count = 0
logger.info('Ready, reading from pipe')
while True:
with open('/dev/shm/poxpipe','r') as pipe:
data = pipe.read()
p = multiprocessing.Process(target=_read_data, args=(data,stats))
p.start()
count += 1
if count % 10 == 0:
pass
#print(stats)
#time.sleep(1)
def _read_data(data, stats):
texts = data.split('#')
for text in texts:
if len(text) > 0:
text = json.loads(text)
if text['type'] == 'switch_portstats':
dpid = text['data']['switch']
# mutate the dictionary
d = stats[0]
# assing values to the dictionary
d['switches'][dpid]['port_stats'] = text['data']['stats']
# at this point, changes are not still synced
# to sync, reassing the dictionary
stats[0] = d
if text['type'] == 'switch_flowstats':
dpid = text['data']['switch']
d = stats[0]
d['switches'][dpid]['flow_stats'] = text['data']['stats']
stats[0] = d
if text['type'] == 'linkstats':
(dpid1, port1),(dpid2, port2) = text['data']['link']
up = 1 if text['data']['up'] is True else 0
d = stats[0]
d['links'][dpid1][port1] = up
d['links'][dpid2][port2] = up
# Hack: conn to controller is always up
d['links'][dpid1][65534] = 1
d['links'][dpid2][65534] = 1
stats[0] = d
def default_True():
return 1
def default_zero():
return 0
def defaultdict_with_zero():
return defaultdict(default_zero)
def default_list():
return defaultdict(list)
def _process_stats(stats, stats_before, stats_processed):
for switch_dpid, switch in stats[0]['switches'].items():
d = stats_processed[0]
d['switches'][switch_dpid]['port_stats'] = list()
d_stats = stats[0]
# Process traffic data with port stats
if not switch.get('port_stats') is None:
for port_stat in switch['port_stats']:
port_no = port_stat['port_no']
rx_before, tx_before = 0,0
for port in stats_before[0]['switches'][switch_dpid].get('port_stats',list()):
if port['port_no'] == port_no:
rx_before = port['rx_packets']
tx_before = port['tx_packets']
# difference between now and before in no of packets
rx_diff = port_stat['rx_packets'] - rx_before
tx_diff = port_stat['tx_packets'] - tx_before
new_data = {'port_no': port_no, 'new_rx_packets': rx_diff, 'new_tx_packets': tx_diff,
'rx_packets': port_stat['rx_packets'], 'tx_packets': port_stat['tx_packets']}
d['switches'][switch_dpid]['port_stats'].append(new_data)
d['switches'][switch_dpid]['flow_stats'] = defaultdict(defaultdict_with_zero)
# Process traffic data with flow stats
if not switch.get('flow_stats') is None:
hosts_stats = defaultdict(defaultdict_with_zero)
# Aggregate all flow stats
for flow_stat in switch['flow_stats']:
addr_dst = flow_stat['match'].get('dl_dst')
addr_src = flow_stat['match'].get('dl_src')
ip_addr = flow_stat['match'].get('nw_dst')
tp_dst = flow_stat['match'].get('tp_dst')
# L2 stats
if not addr_dst is None:
addr_dst = _address_to_dec(addr_dst, separator=':')
hosts_stats[addr_dst]['packets_in'] += flow_stat['packet_count']
hosts_stats[addr_dst]['bytes_in'] += flow_stat['byte_count']
# L2 stats
if not addr_src is None:
addr_src = _address_to_dec(addr_src, separator=':')
hosts_stats[addr_src]['packets_out'] += flow_stat['packet_count']
hosts_stats[addr_src]['bytes_out'] += flow_stat['byte_count']
# L3 stats
if not ip_addr is None and not tp_dst is None:
addr = _ip_addres_to_dec(ip_addr)
hosts_stats[addr]['packets'] += flow_stat['packet_count']
hosts_stats[addr]['bytes'] += flow_stat['byte_count']
for addr, addr_stats in hosts_stats.items():
if not switch.get('flow_stats_aggr') is None and not switch['flow_stats_aggr'].get(addr) is None:
# in_diff = addr_stats['packets_in'] - switch['flow_stats_aggr'].get(addr)['packets_in']
# d['switches'][switch_dpid]['flow_stats'][addr]['new_packets_in'] = in_diff
# d['switches'][switch_dpid]['flow_stats'][addr]['packets_in'] = addr_stats['packets_in']
# out_diff = addr_stats['packets_out'] - switch['flow_stats_aggr'].get(addr)['packets_out']
# d['switches'][switch_dpid]['flow_stats'][addr]['new_packets_out'] = out_diff
# d['switches'][switch_dpid]['flow_stats'][addr]['packets_out'] = addr_stats['packets_out']
diff = addr_stats['packets'] - switch['flow_stats_aggr'].get(addr)['packets']
d['switches'][switch_dpid]['flow_stats'][addr]['new_packets'] = diff
d['switches'][switch_dpid]['flow_stats'][addr]['packets'] = addr_stats['packets_in']
d_stats['switches'][switch_dpid]['flow_stats_aggr'] = hosts_stats
stats[0] = d_stats
stats_processed[0] = d
stats_before[0] = stats[0]
# def default_to_regular(d):
# if isinstance(d, defaultdict):
# print('converting')
# d = {k: default_to_regular(v) for k, v in d.items()}
# return d
def port_status(switch, port, stats):
up = stats[0]['links'].get(switch,{}).get(port,None)
if up is None:
return '?'
if up:
return 'up'
return 'down'
def _address_to_dec(dpid, separator='-'):
non_zero = ''.join([n for n in str(dpid).split(separator) if not n == '00'])
return int('0x{}'.format(str(non_zero)), 16)
def _ip_addres_to_dec(addr):
return addr.split('/')[0].split('.')[-1]
def _soft_plot(x,y):
if x.shape[0] < 5:
return x, y
soft_factor = 10 * x.shape[0]
xnew = np.linspace(x.min(), x.max(), soft_factor)
try:
smooth = spline(x,y,xnew)
b, a = butter(1, 0.01, 'low', analog=False)
filtered = lfilter(b, a, smooth)
# compute the error made by filtering the data
# u,v = smooth.sum(), filtered.sum()
# print((u-v)/u)
except ValueError:
return x,y
return xnew, filtered
def _print_graphs(stats_history):
stats_data = stats_history[:]
stats_data = stats_data[0]
switch_list = [dpid for dpid, data in stats_data['switches'].items()]
switch_list = sorted(switch_list)
port_rx_imgs = [None] * len(switch_list)
port_tx_imgs = [None] * len(switch_list)
flows_imgs = [None] * len(switch_list)
flows_in_imgs = [None] * len(switch_list)
flows_out_imgs = [None] * len(switch_list)
for switch_i, switch_dpid in enumerate(switch_list):
if not stats_data['switches'][switch_dpid].get('port_stats') is None:
port_list = [p for p, data in stats_data['switches'][switch_dpid]['port_stats'].items()]
plt.figure()
rx = np.array(list())
x_rx = np.array(list())
for port_no in port_list:
rx = stats_data['switches'][switch_dpid]['port_stats'][port_no]['new_rx_packets']
rx = np.array(rx)
x_rx = np.arange(len(rx))
status = stats_data['switches'][switch_dpid]['port_stats'][port_no]['port_status']
label = str(port_no) + ': ' + status
x_rx, rx = _soft_plot(x_rx, rx)
plt.plot(x_rx, rx, label=label)
plt.legend(loc='upper left')
img_path = 'img/{}_port_rx.png'.format(switch_dpid)
port_rx_imgs[switch_i] = img_path
plt.savefig('../web/img/{}_port_rx.png'.format(switch_dpid))
plt.close()
plt.figure()
tx = np.array(list())
x_tx = np.array(list())
for port_no in port_list:
tx = stats_data['switches'][switch_dpid]['port_stats'][port_no]['new_tx_packets']
tx = np.array(tx)
x_tx = np.arange(len(tx))
status = stats_data['switches'][switch_dpid]['port_stats'][port_no]['port_status']
label = str(port_no) + ': ' + status
x_tx, tx = _soft_plot(x_tx, tx)
plt.plot(x_tx, tx, label=label)
plt.legend(loc='upper left')
img_path = 'img/{}_port_tx.png'.format(switch_dpid)
port_tx_imgs[switch_i] = img_path
plt.savefig('../web/img/{}_port_tx.png'.format(switch_dpid))
plt.close()
if not stats_data['switches'][switch_dpid].get('flow_stats') is None:
# print(stats_data['switches'][switch_dpid].get('flow_stats'))
hosts_list = [host_no for host_no, data in stats_data['switches'][switch_dpid]['flow_stats'].items()]
fig = plt.figure()
# the only flow that is installed from the beginning
controller_host_no = 19079169
x_length = len(stats_data['switches'][switch_dpid]['flow_stats'][controller_host_no]['new_packets'])
alarm = False
for host_no in hosts_list:
in_ = stats_data['switches'][switch_dpid]['flow_stats'][host_no]['new_packets']
in_ = np.array(in_)
if in_.shape[0] < x_length:
length_append = x_length - in_.shape[0]
in_ = np.concatenate((np.zeros(length_append), in_), axis=0)
x_in_ = np.arange(in_.shape[0])
x_in_, in_ = _soft_plot(x_in_, in_)
if in_[-1] < low_limit:
if not host_no == controller_host_no:
if in_[-1] > 10: # do not consider few traffic
alarm = True
# elif in_[-1] > high_limit:
# alarm = True
label = '{}:80'.format(str(host_no))
plt.plot(x_in_, in_, label=str(host_no))
if alarm:
fig.patch.set_facecolor('red')
plt.plot(x_in_, np.array([low_limit]*len(in_)))
# plt.plot(x_in_, np.array([high_limit]*len(in_)))
plt.legend(loc='upper left')
img_path = 'img/{}_flows.png'.format(switch_dpid)
flows_imgs[switch_i] = img_path
plt.savefig('../web/img/{}_flows.png'.format(switch_dpid), facecolor=fig.get_facecolor())
plt.close()
# x_length = len(stats_data['switches'][switch_dpid]['flow_stats'][controller_host_no]['new_packets_out'])
# for host_no in hosts_list:
# out_ = stats_data['switches'][switch_dpid]['flow_stats'][host_no]['new_packets_out']
# out_ = np.array(out_)
# if len(in_) < x_length:
# out_ = np.concatenate((np.zeros(x_length), out_), axis=0)
# x_out_ = np.arange(out_.shape[0])
# x_out_, out_ = _soft_plot(x_out_, out_)
# plt.plot(x_out_, out_, label=str(host_no))
# plt.legend(loc='upper left')
# img_path = 'img/{}_flows_out.png'.format(switch_dpid)
# flows_out_imgs[switch_i] = img_path
# plt.savefig('../web/img/{}_flows_out.png'.format(switch_dpid))
# plt.close()
with open('../web/visualize.html') as template_file:
template = Template(template_file.read())
with open('../web/index.html', 'w') as index:
print('Writing')
index.write(template.render(switches=switch_list,
port_rx_imgs=port_rx_imgs,
port_tx_imgs=port_tx_imgs,
flows_imgs=flows_imgs,
flows_out_imgs=flows_out_imgs))
def _print_stats(stats, stats_before, stats_processed, stats_history):
count = 0
while True:
time.sleep(5)
count += 1
print(count)
if count == 1:
continue
if count == 2:
stats_before[0] = stats[0]
continue
_process_stats(stats, stats_before, stats_processed)
# message = []
# for switch_dpid, values in stats_processed[0]['switches'].items():
# micro_msg = '{0} ({1})\n'.format(switch_dpid,_address_to_dec(switch_dpid))
# micro_msg += ' -> ports: '
# for port_stat in values['port_stats']:
# micro_msg += ' {0}({5})=> rx:{1}[{3}], tx:{2}[{4}];'.format(port_stat['port_no'],
# port_stat['new_rx_packets'],port_stat['new_tx_packets'],
# port_stat['rx_packets'], port_stat['tx_packets'],
# port_status(_address_to_dec(switch_dpid), int(port_stat['port_no']), stats))
# micro_msg += '\n -> flows: '
# for host_no, host_stats in values['flow_stats'].items():
# micro_msg += ' {0}=> in:{1}[{3}], out:{2}[{4}];'.format(host_no,
# host_stats['new_packets_in'], host_stats['new_packets_out'],
# host_stats['packets_in'], host_stats['packets_out'])
# message.append(micro_msg)
# message = sorted(message, key=lambda msg: re.search('((.*))',msg).group(1))
# message = '\n'.join(message)
# with open('/dev/shm/monitor-stats.log','w') as out:
# out.write(message)
d = stats_history[0]
if count == 3:
for switch_dpid, values in stats_processed[0]['switches'].items():
d['switches'][switch_dpid]['port_stats'] = defaultdict(default_list)
d['switches'][switch_dpid]['flow_stats'] = defaultdict(default_list)
stats_history[0] = d
continue
for switch_dpid, values in stats_processed[0]['switches'].items():
for port_stat in values['port_stats']:
port_no = port_stat['port_no']
try:
d['switches'][switch_dpid]['port_stats'][port_no]['new_rx_packets'].append(port_stat['new_rx_packets'])
d['switches'][switch_dpid]['port_stats'][port_no]['new_tx_packets'].append(port_stat['new_tx_packets'])
d['switches'][switch_dpid]['port_stats'][port_no]['port_no'] = port_no
port_status_ = port_status(_address_to_dec(switch_dpid), int(port_stat['port_no']), stats)
d['switches'][switch_dpid]['port_stats'][port_no]['port_status'] = port_status_
except Exception as e:
print('Error:', e)
continue
for host_no, host_stats in values['flow_stats'].items():
try:
# d['switches'][switch_dpid]['flow_stats'][host_no]['new_packets_in'].append(host_stats['new_packets_in'])
# d['switches'][switch_dpid]['flow_stats'][host_no]['new_packets_out'].append(host_stats['new_packets_out'])
d['switches'][switch_dpid]['flow_stats'][host_no]['new_packets'].append(host_stats['new_packets'])
d['switches'][switch_dpid]['flow_stats'][host_no]['host_no'] = host_no
except Exception as e:
print('Error:',e)
continue
stats_history[0] = d
_print_graphs(stats_history)
if __name__ == '__main__':
logger.info('Starting subprocesses')
manager = multiprocessing.Manager()
# create a list proxy and append a mutable object (dict)
stats = manager.list()
stats.append({'switches':defaultdict(dict), 'links': defaultdict(dict)})
stats_before = manager.list()
stats_before.append({'switches':defaultdict(dict)})
stats_processed = manager.list()
stats_processed.append({'switches':defaultdict(dict)})
stats_history = manager.list()
stats_history.append({'switches':defaultdict(dict)})
printer = multiprocessing.Process(target=_print_stats,
args=(stats,stats_before, stats_processed, stats_history))
printer.start()
try:
_read_pipe(stats)
except KeyboardInterrupt:
printer.terminate()
printer.join()
sys.exit(1)
|
nachtkatze/sdn-diagnosis
|
monitor/monitor.py
|
Python
|
apache-2.0
| 17,272
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import json
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from apps.core.models import Layer, LayerMeta, UserFavoriteLayer
class AbstractLayerTestCase(TestCase):
def setUp(self):
self.client = Client()
self.logged_in_user = 'bob'
self.other_user = 'steve'
self.invalid_user = 'mike'
self.usernames = [self.logged_in_user, self.other_user]
self.save_users()
self.setup_models()
self.client.login(username=self.logged_in_user,
password=self.logged_in_user)
def save_user(self, username):
email = '%s@%s.com' % (username, username)
return User.objects.create_user(username=username,
email=email,
password=username)
def save_users(self):
self.user_models = {}
for username in self.usernames:
self.user_models[username] = self.save_user(username)
def make_layer(self, layer_name, is_public=False):
layer = {
'name': layer_name,
'is_public': is_public,
'capture_start': '2015-08-15',
'capture_end': '2015-08-15',
'images': [
{
'file_name': 'foo.png',
's3_uuid': 'a8098c1a-f86e-11da-bd1a-00112444be1e',
'file_extension': 'png',
},
{
'file_name': 'bar.png',
's3_uuid': 'a8098c1a-f86e-11da-bd1a-00112444be1e',
'file_extension': 'png',
},
],
'tags': [
layer_name
],
'area': 0,
}
return layer
def save_layer(self, layer, user):
url = reverse('create_layer', kwargs={'username': user.username})
response = self.client.post(url,
json.dumps(layer),
content_type='application/json')
Layer.objects.all().update(status_completed=datetime.now())
return response
def setup_models(self):
"""
Create a public and private layer for each user.
"""
for username in self.usernames:
self.client.login(username=username, password=username)
user = self.user_models[username]
layer_name = username + ' Public Layer'
layer = self.make_layer(layer_name, is_public=True)
self.save_layer(layer, user)
layer_name = username + ' Private Layer'
layer = self.make_layer(layer_name, is_public=False)
self.save_layer(layer, user)
def make_many_layers(self):
"""
Create 30 public layers (15 for each user).
"""
for username in self.usernames:
self.client.login(username=username, password=username)
user = self.user_models[username]
for i in range(0, 15):
layer_name = username + ' Public Layer ' + str(i)
layer = self.make_layer(layer_name, is_public=True)
self.save_layer(layer, user)
Layer.objects.all().update(status_completed=datetime.now())
class LayerTestCase(AbstractLayerTestCase):
# Create
def test_create_layer(self):
user = self.user_models[self.logged_in_user]
layer = self.make_layer('Test Layer', self.logged_in_user)
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['images']), 2)
self.assertEqual(len(response.data['tags']), 1)
self.assertEqual(Layer.objects.filter(name='Test Layer').count(), 1)
def test_create_layer_no_permission(self):
user = self.user_models[self.other_user]
layer = self.make_layer('Test Layer', self.other_user)
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 401)
def test_create_layer_date_errors(self):
# start is after end
layer = {
'name': 'n',
'capture_start': '2015-08-15',
'capture_end': '2015-08-14',
}
user = self.user_models[self.logged_in_user]
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 403)
# no capture dates
layer = {
'name': 'n'
}
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 200)
# capture start but no end
layer = {
'name': 'n',
'capture_start': '2015-08-15',
}
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 403)
# capture end but no start
layer = {
'name': 'n',
'capture_start': '2015-08-15',
}
response = self.save_layer(layer, user)
self.assertEqual(response.status_code, 403)
# Modify
def test_modify_layer(self):
orig_name = 'Test Modify Layer'
user = self.user_models[self.logged_in_user]
layer = self.make_layer(orig_name, self.logged_in_user)
response = self.save_layer(layer, user)
new_name = 'Test Modify Layer 2'
layer = json.loads(response.content)
layer['name'] = new_name
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer['id']
})
response = self.client.put(url,
json.dumps(layer),
content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(Layer.objects.filter(name=orig_name).count(), 0)
self.assertEqual(Layer.objects.filter(name=new_name).count(), 1)
# List
def test_list_all_layers(self):
url = reverse('catalog')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# 2 layers from logged_in_user, and 1 public layer from other_user
self.assertEqual(response.data['pages'], 1)
self.assertEqual(response.data['current_page'], 1)
self.assertEqual(len(response.data['layers']), 3)
def test_list_layers_from_logged_in_user(self):
url = reverse('user_layers', kwargs={'username': self.logged_in_user})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['layers']), 2)
def test_list_layers_from_other_user(self):
url = reverse('user_layers', kwargs={'username': self.other_user})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['layers']), 1)
def test_list_layers_from_invalid_user(self):
url = reverse('user_layers', kwargs={'username': self.invalid_user})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# Filtering and Sorting
def test_list_layers_with_filter_by_name_from_logged_in_user(self):
url = reverse('user_layers', kwargs={'username': self.logged_in_user})
url += '?name=%s Public Layer' % (self.logged_in_user,)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
layers = response.data['layers']
self.assertEqual(len(layers), 1)
self.assertEqual(layers[0]['name'],
'%s Public Layer' % (self.logged_in_user,))
def test_list_layers_with_filter_by_tag_from_logged_in_user(self):
url = reverse('user_layers', kwargs={'username': self.logged_in_user})
url += '?tag=%s+Public+Layer' % (self.logged_in_user,)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
layers = response.data['layers']
self.assertEqual(len(layers), 1)
self.assertEqual(layers[0]['name'],
'%s Public Layer' % (self.logged_in_user,))
url = reverse('user_layers', kwargs={'username': self.logged_in_user})
url += '?tag=invalid+tag'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['layers']), 0)
def test_list_layers_with_sorting_from_logged_in_user(self):
url = reverse('user_layers', kwargs={'username': self.logged_in_user})
url += '?o=name'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
layers = response.data['layers']
self.assertEqual(len(layers), 2)
self.assertEqual(layers[0]['name'],
'%s Private Layer' % (self.logged_in_user,))
self.assertEqual(layers[1]['name'],
'%s Public Layer' % (self.logged_in_user,))
# Retrieve
def test_retrieve_layer_from_logged_in_user(self):
user = self.user_models[self.logged_in_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['name'],
'%s Public Layer' % (self.logged_in_user,))
def test_retrieve_invalid_layer_from_logged_in_user(self):
url = reverse('layer_detail', kwargs={
'username': self.logged_in_user,
'layer_id': 100,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_retrieve_public_layer_from_other_user(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['name'],
'%s Public Layer' % (self.other_user,))
def test_retrieve_private_layer_from_other_user(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=False)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# Retrieve LayerMeta
def test_retrieve_meta_from_logged_in_user(self):
user = self.user_models[self.logged_in_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('layer_meta', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.get(url)
# TODO: Test that meta yields "CREATED"
LayerMeta.objects.create(layer=layer, state='IN PROGRESS')
LayerMeta.objects.create(layer=layer, state='DONE')
url = reverse('layer_meta', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['state'], 'DONE')
# Destroy
def test_destroy_layer_from_logged_in_user(self):
user = self.user_models[self.logged_in_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 200)
def test_destroy_invalid_layer_from_logged_in_user(self):
url = reverse('layer_detail', kwargs={
'username': self.logged_in_user,
'layer_id': 100,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_destroy_layer_from_other_user(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 401)
layer = Layer.objects.filter(user=user, is_public=False)[0]
url = reverse('layer_detail', kwargs={
'username': user.username,
'layer_id': layer.id,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
class FavoriteTestCase(AbstractLayerTestCase):
def setup_models(self):
super(FavoriteTestCase, self).setup_models()
self.setup_favorites()
def save_favorite(self, layer, user):
return UserFavoriteLayer.objects.create(layer=layer, user=user)
def setup_favorites(self):
# logged_in_user favorites other_user's public layer
layer = Layer.objects.filter(user__username=self.other_user,
is_public=True)[0]
self.save_favorite(layer, self.user_models[self.logged_in_user])
# Create
def test_create_favorite_from_other_user_public_layer(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('create_or_destroy_favorite', kwargs={
'layer_id': layer.id,
})
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
favorites = UserFavoriteLayer.objects.filter(
user__username=self.logged_in_user)
self.assertEqual(len(favorites), 1)
self.assertEqual(favorites[0].layer, layer)
def test_create_favorite_from_other_user_private_layer(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=False)[0]
url = reverse('create_or_destroy_favorite', kwargs={
'layer_id': layer.id,
})
response = self.client.post(url)
self.assertEqual(response.status_code, 404)
# List
def test_list_favorites_from_logged_in_user(self):
url = reverse('favorites')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['layers']), 1)
# Destroy
def test_destroy_favorite_from_logged_in_user(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=False)[0]
url = reverse('create_or_destroy_favorite', kwargs={
'layer_id': layer.id,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_destroy_invalid_favorite_from_logged_in_user(self):
url = reverse('create_or_destroy_favorite', kwargs={
'layer_id': 100,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_destroy_favorite_from_other_user(self):
user = self.user_models[self.other_user]
layer = Layer.objects.filter(user=user, is_public=True)[0]
url = reverse('create_or_destroy_favorite', kwargs={
'layer_id': layer.id,
})
response = self.client.delete(url)
self.assertEqual(response.status_code, 200)
class PaginationTestCase(AbstractLayerTestCase):
def setup_models(self):
Layer.objects.all().delete()
super(PaginationTestCase, self).make_many_layers()
def test_pagination(self):
def pagination_assertion(response, pages, current, layers):
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 5)
self.assertEqual(int(response.data['pages']), pages)
self.assertEqual(int(response.data['current_page']), current)
self.assertEqual(len(response.data['layers']), layers)
url = reverse('catalog')
response = self.client.get(url)
# 30 layers from 3 users
pagination_assertion(response, 3, 1, 10)
# Page 2.
response = self.client.get(url + '?page=2')
pagination_assertion(response, 3, 2, 10)
# Page 3.
response = self.client.get(url + '?page=3')
pagination_assertion(response, 3, 3, 10)
# Numbers greater than the last page should return the last page.
response = self.client.get(url + '?page=4')
pagination_assertion(response, 3, 3, 10)
# Non numbers should return the first page.
response = self.client.get(url + '?page=foo')
pagination_assertion(response, 3, 1, 10)
# page_size=0 should return all layers on a single page
response = self.client.get(url + '?page=foo&page_size=0')
pagination_assertion(response, 1, 1, 30)
# page_size=5 should return 5 layers per page
response = self.client.get(url + '?page_size=5')
pagination_assertion(response, 6, 1, 5)
class OrderingAndFilteringTestCase(AbstractLayerTestCase):
layer_data = [
{
'name': 'Alpha',
'tag': 'tag1',
'area': 3,
'capture_start': '2015-08-15',
'capture_end': '2015-08-15',
'srid': 'mercator'
},
{
'name': 'Beta',
'tag': 'tag2',
'area': 4,
'capture_start': '2015-08-19',
'capture_end': '2015-08-19',
'srid': 'mercator'
},
{
'name': 'Gamma',
'tag': 'tag3',
'area': 5,
'capture_start': '2015-08-08',
'capture_end': '2015-08-08',
'srid': '4326'
},
{
'name': 'Delta',
'tag': 'tag4',
'area': 6,
'capture_start': '2015-08-02',
'capture_end': '2015-08-02',
'srid': 'utm'
},
{
'name': 'Epsilon',
'tag': 'tag4',
'area': 1,
'capture_start': '2015-08-22',
'capture_end': '2015-08-22',
'srid': 'epsg'
},
{
'name': 'Zeta',
'tag': 'tag5',
'area': 2,
'capture_start': '2015-08-21',
'capture_end': '2015-08-21',
'srid': '4326'
}
]
def setup_models(self):
Layer.objects.all().delete()
username = self.usernames[0]
self.client.login(username=username, password=username)
user = self.user_models[username]
for data in self.layer_data:
layer_name = data['name']
tag = data['tag']
layer = self.make_layer(layer_name, is_public=True)
layer['tags'] = [tag]
layer['area'] = data['area']
layer['capture_start'] = data['capture_end']
layer['capture_end'] = data['capture_end']
layer['srid'] = data['srid']
# Organization name is the reverse of the name.
layer['organization'] = data['name'][::-1]
self.save_layer(layer, user)
def confirm_order(self, layers, order):
for i in range(0, len(layers)):
layer_index = order[i] - 1
self.assertEqual(layers[i]['name'],
self.layer_data[layer_index]['name'])
def test_area_ordering(self):
url = reverse('catalog')
response = self.client.get(url + '?o=area')
self.assertEqual(int(response.data['pages']), 1)
self.assertEqual(int(response.data['current_page']), 1)
layers = response.data['layers']
self.assertEqual(len(layers), 6)
self.confirm_order(layers, [5, 6, 1, 2, 3, 4])
def test_start_ordering(self):
url = reverse('catalog')
response = self.client.get(url + '?o=capture_start')
layers = response.data['layers']
self.assertEqual(len(layers), 6)
self.confirm_order(layers, [4, 3, 1, 2, 6, 5])
def test_end_ordering(self):
url = reverse('catalog')
response = self.client.get(url + '?o=capture_end')
layers = response.data['layers']
self.assertEqual(len(layers), 6)
self.confirm_order(layers, [4, 3, 1, 2, 6, 5])
def test_srid_ordering(self):
url = reverse('catalog')
response = self.client.get(url + '?o=srid')
layers = response.data['layers']
self.assertEqual(len(layers), 6)
order = ['4326', '4326', 'epsg', 'mercator', 'mercator', 'utm']
for i in range(0, 6):
self.assertEqual(layers[i]['srid'], order[i])
def test_filter_tag1(self):
url = reverse('catalog')
response = self.client.get(url + '?name_search=ta')
layers = response.data['layers']
self.assertEqual(len(layers), 6)
def test_filter_tag2(self):
url = reverse('catalog')
response = self.client.get(url + '?name_search=tag4')
layers = response.data['layers']
self.assertEqual(len(layers), 2)
def test_filter_tag3(self):
url = reverse('catalog')
response = self.client.get(url + '?name_search=et')
layers = response.data['layers']
self.assertEqual(len(layers), 2)
def test_filter_tag4(self):
url = reverse('catalog')
response = self.client.get(url + '?name_search=gamma')
layers = response.data['layers']
self.assertEqual(len(layers), 1)
def test_filter_tag5(self):
url = reverse('catalog')
response = self.client.get(url + '?name_search=ammag')
layers = response.data['layers']
self.assertEqual(len(layers), 1)
class PaginationSortingAndFilteringTestCase(AbstractLayerTestCase):
def setup_models(self):
Layer.objects.all().delete()
username = self.usernames[0]
self.client.login(username=username, password=username)
user = self.user_models[username]
# Add one layer that can be filtered out.
layer_name = 'TK'
layer = self.make_layer(layer_name, is_public=True)
layer['tags'] = ['TKTK']
layer['area'] = 10
layer['capture_start'] = '2015-01-01'
layer['capture_end'] = '2015-01-01'
layer['organization'] = 'TK'
self.save_layer(layer, user)
# Add 30 layers.
super(PaginationSortingAndFilteringTestCase, self).make_many_layers()
def test_next_prev_pages(self):
url = reverse('catalog')
response = self.client.get(url)
self.assertEqual(response.data['prev_url'], None)
self.assertEqual(response.data['next_url'], '/catalog.json?page=2')
self.assertEqual(int(response.data['pages']), 4)
# Filter down to 30 results.
response = self.client.get(url + '?name_search=pub&o=area')
self.assertEqual(response.data['prev_url'], None)
self.assertEqual(response.data['next_url'],
'/catalog.json?name_search=pub&page=2&o=area')
self.assertEqual(int(response.data['pages']), 3)
# Go to page 2
response = self.client.get(url + '?name_search=pub&page=2&o=area')
self.assertEqual(response.data['prev_url'],
'/catalog.json?name_search=pub&page=1&o=area')
self.assertEqual(response.data['next_url'],
'/catalog.json?name_search=pub&page=3&o=area')
# Go to page 3
response = self.client.get(url + '?name_search=pub&page=3&o=area')
self.assertEqual(response.data['prev_url'],
'/catalog.json?name_search=pub&page=2&o=area')
self.assertEqual(response.data['next_url'], None)
|
kdeloach/raster-foundry
|
src/rf/apps/home/tests.py
|
Python
|
apache-2.0
| 24,353
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
from PyLuceneTestCase import PyLuceneTestCase
from org.apache.lucene.analysis.core import WhitespaceAnalyzer
from org.apache.lucene.document import Document
from org.apache.lucene.index import DirectoryReader, IndexWriterConfig
from org.apache.pylucene.index import PythonIndexDeletionPolicy
class MyDeletionPolicy(PythonIndexDeletionPolicy):
onInitCalled = False
onCommitCalled = False
def onInit(self, commits):
self.onInitCalled = True
def onCommit(self, commits):
self.onCommitCalled = True
class IndexDeletionPolicyTestCase(PyLuceneTestCase):
def getConfig(self, analyzer):
self.policy = MyDeletionPolicy()
config = IndexWriterConfig(analyzer)
config.setIndexDeletionPolicy(self.policy)
return config
def testIndexDeletionPolicy(self):
writer = self.getWriter()
# no commits exist in the index yet
self.assertTrue(self.policy.onInitCalled)
# we haven't called commit yet
self.assertFalse(self.policy.onCommitCalled)
doc = Document()
writer.addDocument(doc)
writer.commit()
# now we called commit
self.assertTrue(self.policy.onCommitCalled)
# external IR sees 1 commit:
self.assertEquals(1, DirectoryReader.listCommits(self.directory).size())
# commit again:
writer.addDocument(doc)
writer.commit()
# external IR sees 2 commits:
self.assertEquals(2, DirectoryReader.listCommits(self.directory).size())
writer.close()
# open same index, make sure both commits survived:
writer = self.getWriter()
self.assertTrue(self.policy.onInitCalled)
self.assertFalse(self.policy.onCommitCalled)
self.assertEquals(2, DirectoryReader.listCommits(self.directory).size())
writer.close()
# 3 from closing writer again
self.assertEquals(3, DirectoryReader.listCommits(self.directory).size())
if __name__ == "__main__":
lucene.initVM()
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
else:
unittest.main()
|
svn2github/pylucene
|
test2/test_IndexDeletionPolicy.py
|
Python
|
apache-2.0
| 2,964
|
from __future__ import print_function
import os
import sys
import networkx as nx
import pandas as pd
import re
# -----------------------------------------------------------------------------
def msplit(string, delims):
s = string
for d in delims:
rep = d + '\n'
s = rep.join(x for x in s.split(d))
return s
def segment_long_labels(string, maxlen=7, delims=[]):
if (not delims) and (len(string) > maxlen):
# return '\n'.join(string[i:i+maxlen]
# for i in range(0, len(string), maxlen))
return "\n".join(re.findall("(?s).{,"+str(maxlen)+"}", string))[:-1]
elif len(string) > maxlen:
return msplit(string, delims)
else:
return string
# -----------------------------------------------------------------------------
def draw_sys_layout(G, comp_df,
out_dir="",
out_file="system_layout",
graph_label="System Layout",
orientation="TB",
connector_type="spline",
clustering=False):
"""Draws the component configuration for a given infrastructure system."""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set up output file names & location
if not out_dir.strip():
output_path = os.getcwd()
else:
output_path = out_dir
fname = out_file.split(os.extsep)[0] # strip away file ext and add our own
sys_config_dot = os.path.join(output_path, fname + '.dot')
if orientation.upper() not in ['TB', 'LR', 'RL', 'BT']:
orientation = 'TB'
if connector_type.lower() not in ['spline', 'ortho', 'line',
'polyline', 'curved']:
connector_type = 'spline'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Draw graph using pygraphviz, and define general node & edge attributes
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(
resolution=200,
directed=True,
labelloc="t",
label='< <BR/>'+graph_label+'<BR/> >',
rankdir=orientation,
ranksep="1.0 equally",
splines=connector_type,
center="true",
forcelabels="true",
pad=0.2,
nodesep=0.4,
fontname="Helvetica-Bold",
fontcolor="#444444",
fontsize=26,
smoothing="graph_dist",
concentrate="true",
)
A.node_attr.update(
shape="circle",
style="rounded,filled",
fixedsize="true",
width=1.8,
height=1.8,
xlp="0, 0",
color="royalblue3", # gray14
fillcolor="white",
fontcolor="royalblue3", # gray14
penwidth=1.5,
fontname="Helvetica-Bold",
fontsize=18,
)
A.edge_attr.update(
arrowhead="normal",
arrowsize="1.0",
style="bold",
color="royalblue2", # gray12
penwidth=1.2,
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Customise nodes based on node type or defined clusters
for node in comp_df.index.values.tolist():
label_mod = segment_long_labels(node, delims=['_', ' '])
A.get_node(node).attr['label'] = label_mod
if str(comp_df.ix[node]['node_type']).lower() == 'supply':
A.get_node(node).attr['label'] = \
segment_long_labels(node, maxlen=12, delims=['_', ' '])
A.get_node(node).attr.update(
label=A.get_node(node).attr['label'],
shape="rect",
style="rounded,filled",
fixedsize="true",
color="limegreen",
fillcolor="white",
fontcolor="limegreen",
penwidth=2.0,
height=1.2,
width=2.2,
)
if str(comp_df.ix[node]['node_type']).lower() == 'sink':
A.get_node(node).attr.update(
shape="doublecircle",
rank="sink",
penwidth=2.0,
color="orangered", # royalblue3
fillcolor="white",
fontcolor="orangered", # royalblue3
)
if str(comp_df.ix[node]['node_type']).lower() == 'dependency':
A.get_node(node).attr.update(
shape="circle",
rank="dependency",
penwidth=3.5,
color="orchid",
fillcolor="white",
fontcolor="orchid"
)
if clustering == True:
for cluster in pd.unique(comp_df['node_cluster'].values):
grp = comp_df[comp_df['node_cluster'] == cluster].\
index.values.tolist()
cluster = '_'.join(cluster.split())
if cluster.lower() not in ['none', '']:
cluster_name = 'cluster_'+cluster
rank = 'same'
else:
cluster_name = ''
rank = ''
A.add_subgraph(
nbunch=grp,
name=cluster_name,
style='invis',
label='',
clusterrank='local',
rank=rank,
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A.write(sys_config_dot)
A.draw(os.path.join(output_path, fname + '.png'),
format='png', prog='dot')
A.draw(os.path.join(output_path, fname + '.svg'),
format='svg', prog='dot', args='-Gsize=11,8\! -Gdpi=300')
# -----------------------------------------------------------------------------
def main():
from sifraclasses import Scenario, PowerStation, PotableWaterTreatmentPlant
SETUPFILE = sys.argv[1]
discard = {}
config = {}
exec (open(SETUPFILE).read(), discard, config)
print("Setting up objects...")
FacilityObj = eval(config["SYSTEM_CLASS"])
sc = Scenario(SETUPFILE)
fc = FacilityObj(SETUPFILE)
# Define input files, output location, scenario inputs
# SYS_CONFIG_FILE = os.path.join(scn.input_path, sysobj.sys_config_file_name)
print("Initiating drawing network model schematic...")
fc.network.network_setup(fc)
print("Drawing complete.\n")
if __name__=="__main__":
main()
'''
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*** Required Development: Absolute Node Positioning ***
i.e. the ability to specify exact location of each node on the canvas.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
The implementation could use this advice from graphviz developer:
http://www.graphviz.org/content/set-positions-node#comment-1771
Most of the Graphviz layout algorithms ignore position information. Indeed,
setting initial positions doesn't fit well with what the algorithms are trying
to do. The general idea is to specify more abstract constraints and then let
the algorithm do its best. That said, neato and fdp do allow you to provide
initial position information. Simply set the pos attribute in your input graph.
For example,
graph G { abc [pos="200,300"] }
(If you run fdp or neato, use the -s flag to make sure the coordinates are
interpreted as point values. Also, if you provide positions,
you may find neato -Kmode=KK better.) For more information, see
http://www.graphviz.org/content/attrs#dpos
http://www.graphviz.org/content/attrs#kpoint
If you know all of the node positions, you can use:
neato -n or neato -n2 (without -s) to do edge routing followed by rendering.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
|
gasuperdev/sifra
|
sifra/systemlayout.py
|
Python
|
apache-2.0
| 7,677
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model constructor for Tensorflow state-level models."""
from typing import Dict, List
import numpy as np
import tensorflow as tf
from covid_epidemiology.src import constants
from covid_epidemiology.src.models import generic_seir_model_constructor
from covid_epidemiology.src.models import losses
from covid_epidemiology.src.models.shared import model_utils
class StateModelConstructor(generic_seir_model_constructor.ModelConstructor):
"""Constructs a state Tensorflow model, to be used in tf_seir."""
def __init__(self, model_spec, random_seed=0):
super(StateModelConstructor, self).__init__(model_spec, random_seed)
self.num_states = 17
def extract_prediction(self, all_states):
"""Extract the death and confirmed predictions."""
confirmed_all = list()
death_all = list()
for curr_state in all_states:
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t,
icu_t, ventilator_t, death_t, population_t, reinfectable_d_t,
reinfectable_ud_t, reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(curr_state)
# Include ICU and Ventilator since they are separate compartments.
confirmed_t = (
infected_d_t + recovered_d_t + hospitalized_t + icu_t + ventilator_t +
death_t + reinfectable_d_t)
confirmed_all.append(confirmed_t)
death_all.append(death_t)
return {"confirmed": confirmed_all, "death": death_all}
def compute_coef(self,
ground_truth_timeseries,
ground_truth_state,
num_train_steps,
num_known_steps,
power=2.0):
"""Compute train/valid coefficients for loss computation.
Args:
ground_truth_timeseries: ground truth compartments
ground_truth_state: ground truth state level compartments
num_train_steps: number of timesteps for training
num_known_steps: number of known timesteps
power: 2 for MSE and 1 for MAE
Returns:
train_coefs: training coeffcients for each compartment
valid_coefs: valid coeffcients for each compartment
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
# Recovered
recovered_train, recovered_valid = model_utils.compartment_base(
gt_list["recovered"], gt_indicator["recovered"], num_train_steps,
num_known_steps)
# Death
death_train, death_valid = model_utils.compartment_base(
gt_list["death"], gt_indicator["death"], num_train_steps,
num_known_steps)
# Confirmed
confirmed_train, confirmed_valid = model_utils.compartment_base(
gt_list["confirmed"], gt_indicator["confirmed"], num_train_steps,
num_known_steps)
# Hospitalized
hospitalized_train, hospitalized_valid = model_utils.compartment_base(
gt_list["hospitalized"], gt_indicator["hospitalized"], num_train_steps,
num_known_steps)
# Hospitalized cumulative
hospitalized_cumulative_train, hospitalized_cumulative_valid = model_utils.compartment_base(
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"], num_train_steps,
num_known_steps)
# ICU
icu_train, icu_valid = model_utils.compartment_base(gt_list["icu"],
gt_indicator["icu"],
num_train_steps,
num_known_steps)
# Ventilator
ventilator_train, ventilator_valid = model_utils.compartment_base(
gt_list["ventilator"], gt_indicator["ventilator"], num_train_steps,
num_known_steps)
train_coefs = [
0, (death_train / recovered_train)**power, 1,
(death_train / confirmed_train)**power,
(death_train / hospitalized_train)**power,
(death_train / hospitalized_cumulative_train)**power,
(death_train / icu_train)**power,
(death_train / ventilator_train)**power
]
valid_coefs = [
0, (death_valid / recovered_valid)**power, 1,
(death_valid / confirmed_valid)**power,
(death_valid / hospitalized_valid)**power,
(death_valid / hospitalized_cumulative_valid)**power,
(death_valid / icu_valid)**power,
(death_valid / ventilator_valid)**power
]
train_coefs = np.nan_to_num(train_coefs).tolist()
valid_coefs = np.nan_to_num(valid_coefs).tolist()
return train_coefs, valid_coefs
def seir_dynamics(self, current_state, seir_variables):
"""Model dynamics."""
(first_dose_vaccine_ratio_per_day, second_dose_vaccine_ratio_per_day,
average_contact_id, average_contact_iud, reinfectable_rate, alpha,
diagnosis_rate, recovery_rate_id, recovery_rate_iud, recovery_rate_h,
recovery_rate_i, recovery_rate_v, hospitalization_rate, icu_rate,
ventilator_rate, death_rate_id, death_rate_h, death_rate_i,
death_rate_v) = seir_variables
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t, icu_t,
ventilator_t, death_t, population_t, reinfectable_d_t, reinfectable_ud_t,
reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(current_state)
# Setting the susceptible so that the population adds up to a constant.
normalized_susceptible_t = 1.0 - (
exposed_t + infected_d_t + infected_ud_t + recovered_d_t +
recovered_ud_t + hospitalized_t + icu_t + ventilator_t + death_t +
vaccine_immuned_t) / population_t
normalized_susceptible_t = tf.nn.relu(normalized_susceptible_t)
# Differential change on vaccine immuned.
d_vaccine_immuned_dt = (
first_dose_vaccine_ratio_per_day * population_t +
second_dose_vaccine_ratio_per_day * population_t -
reinfectable_vaccine_t - vaccine_immuned_t)
# Differential change on reinfectable after vaccination.
d_reinfectable_vaccine_dt = vaccine_immuned_t * 1.0 / constants.VACCINE_IMMUNITY_DURATION
# Differential change on exposed
d_exposed_dt = (average_contact_id * infected_d_t +
average_contact_iud * infected_ud_t
) * normalized_susceptible_t - alpha * exposed_t
# Differential change on infected, documented and undocumented
d_infected_d_dt = (
diagnosis_rate * infected_ud_t - recovery_rate_id * infected_d_t -
death_rate_id * infected_d_t - hospitalization_rate * infected_d_t)
d_infected_ud_dt = (
alpha * exposed_t - diagnosis_rate * infected_ud_t -
recovery_rate_iud * infected_ud_t)
d_infected_ud_increase_dt = alpha * exposed_t - infected_ud_increase_t
# Differential change on recovered, documented and undocumented
d_recovered_d_dt = (
recovery_rate_id * infected_d_t + recovery_rate_h * hospitalized_t -
reinfectable_rate * recovered_d_t)
d_recovered_ud_dt = (
recovery_rate_iud * infected_ud_t - reinfectable_rate * recovered_ud_t)
# Differential change on hospitalized
d_hospitalized_d_dt = (
hospitalization_rate * infected_d_t -
(death_rate_h + recovery_rate_h + icu_rate) * hospitalized_t +
recovery_rate_i * icu_t)
d_hospitalized_cumulative_d_dt = (hospitalization_rate * infected_d_t)
d_hospitalized_increase_d_dt = (
hospitalization_rate * infected_d_t - hospitalized_increase_t)
# Differential change on icu
d_icu_d_dt = (
icu_rate * hospitalized_t -
(death_rate_i + recovery_rate_i + ventilator_rate) * icu_t +
recovery_rate_v * ventilator_t)
# Differential change on ventilator
d_ventilator_d_dt = (
ventilator_rate * icu_t -
(death_rate_v + recovery_rate_v) * ventilator_t)
# Differential change on death, documented
d_death_d_dt = (
death_rate_id * infected_d_t + death_rate_h * hospitalized_t +
death_rate_i * icu_t + death_rate_v * ventilator_t)
# Differential change on recovered, who may get the disease again.
d_reinfectable_d_dt = reinfectable_rate * recovered_d_t
d_reinfectable_ud_dt = reinfectable_rate * recovered_ud_t
all_state_derivatives = [
d_exposed_dt, d_infected_d_dt, d_infected_ud_dt, d_recovered_d_dt,
d_recovered_ud_dt, d_hospitalized_d_dt, d_hospitalized_cumulative_d_dt,
d_hospitalized_increase_d_dt, d_icu_d_dt, d_ventilator_d_dt,
d_death_d_dt, -d_death_d_dt, d_reinfectable_d_dt, d_reinfectable_ud_dt,
d_reinfectable_vaccine_dt, d_vaccine_immuned_dt,
d_infected_ud_increase_dt
]
return tf.stack(all_state_derivatives)
def compute_losses(self,
hparams,
train_coefs,
valid_coefs,
propagated_states,
ground_truth_timeseries,
r_eff,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
num_forecast_steps,
quantiles=None):
train_loss_coefs = hparams["train_loss_coefs"]
valid_loss_coefs = hparams["valid_loss_coefs"]
time_scale_weight = hparams["time_scale_weight"]
width_coef_train = hparams["width_coef_train"]
width_coef_valid = hparams["width_coef_valid"]
quantile_cum_viol_coef = hparams["quantile_cum_viol_coef"]
increment_loss_weight = hparams["increment_loss_weight"]
train_crps_weight = hparams["train_crps_weight"]
valid_crps_weight = hparams["valid_crps_weight"]
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_hospitalized_cumulative = unstacked_propagated_states[6]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
train_start_index = tf.identity(train_start_index)
train_end_index = tf.identity(train_end_index)
valid_start_index = tf.identity(valid_start_index)
valid_end_index = tf.identity(valid_end_index)
if quantiles is not None:
quantiles = tf.constant(quantiles, dtype=tf.float32)
# Use quantile loss if the value of quantiles are given
def loss(pred_states,
gt_list,
gt_indicator,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=0,
is_training=True):
if quantiles is not None:
if is_training:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
else:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
train_loss += train_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight)
valid_loss += valid_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight)
else:
train_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
valid_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
return train_loss, valid_loss
infected_doc_train_loss, infected_doc_valid_loss = loss(
pred_infected,
gt_list["infected"],
gt_indicator["infected"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
recovered_doc_train_loss, recovered_doc_valid_loss = loss(
pred_recovered + pred_reinfected,
gt_list["recovered"],
gt_indicator["recovered"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
death_train_loss, death_valid_loss = loss(
pred_death,
gt_list["death"],
gt_indicator["death"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_train_loss, hospitalized_valid_loss = loss(
pred_hospitalized + pred_icu + pred_ventilator,
gt_list["hospitalized"],
gt_indicator["hospitalized"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_cumulative_train_loss, hospitalized_cumulative_valid_loss = loss(
pred_hospitalized_cumulative,
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
icu_train_loss, icu_valid_loss = loss(
pred_icu + pred_ventilator,
gt_list["icu"],
gt_indicator["icu"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
ventilator_train_loss, ventilator_valid_loss = loss(
pred_ventilator,
gt_list["ventilator"],
gt_indicator["ventilator"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
confirmed_train_loss, confirmed_valid_loss = loss(
pred_confirmed,
gt_list["confirmed"],
gt_indicator["confirmed"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
train_loss_overall = (
train_coefs[0] * train_loss_coefs[0] * infected_doc_train_loss +
train_coefs[1] * train_loss_coefs[1] * recovered_doc_train_loss +
train_coefs[2] * train_loss_coefs[2] * death_train_loss +
train_coefs[3] * train_loss_coefs[3] * confirmed_train_loss +
train_coefs[4] * train_loss_coefs[4] * hospitalized_train_loss +
train_coefs[5] *
(train_loss_coefs[5] * hospitalized_cumulative_train_loss) +
train_coefs[6] * train_loss_coefs[6] * icu_train_loss +
train_coefs[7] * train_loss_coefs[7] * ventilator_train_loss)
valid_loss_overall = (
valid_coefs[0] * valid_loss_coefs[0] * infected_doc_valid_loss +
valid_coefs[1] * valid_loss_coefs[1] * recovered_doc_valid_loss +
valid_coefs[2] * valid_loss_coefs[2] * death_valid_loss +
valid_coefs[3] * valid_loss_coefs[3] * confirmed_valid_loss +
valid_coefs[4] * valid_loss_coefs[4] * hospitalized_valid_loss +
valid_coefs[5] *
(valid_loss_coefs[5] * hospitalized_cumulative_valid_loss) +
valid_coefs[6] * valid_loss_coefs[6] * icu_valid_loss +
valid_coefs[7] * valid_loss_coefs[7] * ventilator_valid_loss)
# Loss for r_eff. Penalize r_eff>5
if quantiles is None:
if r_eff is not None:
train_loss_overall += (
hparams["r_eff_penalty_coef"] * tf.math.reduce_mean(
tf.math.softplus(r_eff - hparams["r_eff_penalty_cutoff"])))
# Calculate accelration
train_loss_overall += (
hparams["acceleration_death_coef"] *
self.acceleration_loss(pred_death, 3))
train_loss_overall += (
hparams["acceleration_confirm_coef"] *
self.acceleration_loss(pred_confirmed, 3))
train_loss_overall += (
hparams["acceleration_hospital_coef"] *
self.acceleration_loss(pred_hospitalized, 3))
else:
# Quantile cumulative violation penalty
forecasting_horizon = valid_end_index - valid_start_index
train_violation_confirmed = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
train_violation_death = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_confirmed)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_death)
valid_violation_confirmed = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
valid_violation_death = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_confirmed)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_death)
return train_loss_overall, valid_loss_overall
def unpack_states(self,
chosen_location_list,
ground_truth_timeseries,
propagated_states,
propagated_variables,
num_forecast_steps,
quantile_regression=False):
# Assign in the desired dictionary form.
susceptible_f_all_locations = {}
exposed_f_all_locations = {}
infected_d_f_all_locations = {}
infected_ud_f_all_locations = {}
recovered_d_f_all_locations = {}
recovered_ud_f_all_locations = {}
death_d_f_all_locations = {}
death_horizon_ahead_d_f_all_locations = {}
confirmed_f_all_locations = {}
confirmed_horizon_ahead_d_f_all_locations = {}
hospitalized_f_all_locations = {}
hospitalized_increase_f_all_locations = {}
hospitalized_cumulative_f_all_locations = {}
icu_f_all_locations = {}
ventilator_f_all_locations = {}
reinfectable_d_f_all_locations = {}
reinfectable_ud_f_all_locations = {}
population_f_all_locations = {}
reinfectable_vaccine_f_all_locations = {}
vaccine_immuned_t_f_all_locations = {}
infected_ud_increase_f_all_locations = {}
for location_index, location in enumerate(chosen_location_list):
exposed_f_all_locations[
location] = propagated_states[:, 0, location_index].numpy()
infected_d_f_all_locations[
location] = propagated_states[:, 1, location_index].numpy()
infected_ud_f_all_locations[
location] = propagated_states[:, 2, location_index].numpy()
recovered_d_f_all_locations[location] = (
propagated_states[:, 3, location_index].numpy())
recovered_ud_f_all_locations[location] = (
propagated_states[:, 4, location_index].numpy())
hospitalized_f_all_locations[location] = (
propagated_states[:, 5, location_index].numpy() +
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
hospitalized_increase_f_all_locations[
location] = propagated_states[:, 7, location_index].numpy()
hospitalized_cumulative_f_all_locations[
location] = propagated_states[:, 6, location_index].numpy()
icu_f_all_locations[location] = (
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
ventilator_f_all_locations[
location] = propagated_states[:, 9, location_index].numpy()
death_d_f_all_locations[
location] = propagated_states[:, 10, location_index].numpy()
death_horizon_ahead_d_f_all_locations[location] = (
propagated_states[num_forecast_steps - 1:, 10,
location_index].numpy() -
propagated_states[:-num_forecast_steps + 1, 10,
location_index].numpy())
population_f_all_locations[
location] = propagated_states[:, 11, location_index].numpy()
reinfectable_d_f_all_locations[
location] = propagated_states[:, 12, location_index].numpy()
reinfectable_ud_f_all_locations[
location] = propagated_states[:, 13, location_index].numpy()
reinfectable_vaccine_f_all_locations[
location] = propagated_states[:, 14, location_index].numpy()
vaccine_immuned_t_f_all_locations[
location] = propagated_states[:, 15, location_index].numpy()
infected_ud_increase_f_all_locations[
location] = propagated_states[:, 16, location_index].numpy()
confirmed_f_all_locations[location] = (
infected_d_f_all_locations[location] +
recovered_d_f_all_locations[location] +
death_d_f_all_locations[location] +
hospitalized_f_all_locations[location])
confirmed_horizon_ahead_d_f_all_locations[location] = (
confirmed_f_all_locations[location][num_forecast_steps - 1:, :] -
confirmed_f_all_locations[location][:-num_forecast_steps + 1, :])
susceptible_f_all_locations[location] = np.maximum(
0, (population_f_all_locations[location] -
confirmed_f_all_locations[location] -
exposed_f_all_locations[location] -
recovered_ud_f_all_locations[location] -
infected_ud_f_all_locations[location] -
vaccine_immuned_t_f_all_locations[location]))
recovered_d_f_all_locations[location] = (
recovered_d_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
recovered_ud_f_all_locations[location] = (
recovered_ud_f_all_locations[location] +
reinfectable_ud_f_all_locations[location])
confirmed_f_all_locations[location] = (
confirmed_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
# Lower bound of the cumulative quantiles are the last values.
# for all constructors.
if quantile_regression:
(_, gt_list, _, _, _) = ground_truth_timeseries
death_d_f_all_locations = self.lowerbound_postprocessing(
death_d_f_all_locations, gt_list["death"][:, location_index],
location, num_forecast_steps)
confirmed_f_all_locations = self.lowerbound_postprocessing(
confirmed_f_all_locations, gt_list["confirmed"][:, location_index],
location, num_forecast_steps)
recovered_d_f_all_locations = self.lowerbound_postprocessing(
recovered_d_f_all_locations, gt_list["recovered"][:,
location_index],
location, num_forecast_steps)
recovered_ud_f_all_locations = self.lowerbound_postprocessing(
recovered_ud_f_all_locations, None, location, num_forecast_steps)
reinfectable_d_f_all_locations = self.lowerbound_postprocessing(
reinfectable_d_f_all_locations, None, location, num_forecast_steps)
reinfectable_ud_f_all_locations = self.lowerbound_postprocessing(
reinfectable_ud_f_all_locations, None, location, num_forecast_steps)
rates = self.extract_rates(propagated_variables, chosen_location_list)
return (susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations,
confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates)
def pack_compartments(self, states, ground_truth_timeseries,
num_forecast_steps):
"""Packs predictions into compartments with associated ground truth."""
(susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations, confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates) = states
(_, _, _, _, orig_gt) = ground_truth_timeseries
# pack all results in a list of compartment dataclasses.
susceptible_compartment = generic_seir_model_constructor.Compartment(
name=constants.SUSCEPTIBLE,
predictions=susceptible_f_all_locations,
num_forecast_steps=num_forecast_steps)
exposed_compartment = generic_seir_model_constructor.Compartment(
name=constants.EXPOSED,
predictions=exposed_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_DOC,
predictions=infected_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["infected"])
infected_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC,
predictions=infected_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_ud_increase_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC_INCREASE,
predictions=infected_ud_increase_f_all_locations,
num_forecast_steps=num_forecast_steps)
recovered_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_DOC,
predictions=recovered_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["recovered"])
recovered_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_UNDOC,
predictions=recovered_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
death_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.DEATH,
predictions=death_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["death"])
confirmed_compartment = generic_seir_model_constructor.Compartment(
name=constants.CONFIRMED,
predictions=confirmed_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["confirmed"])
hospitalized_compartment = generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED,
predictions=hospitalized_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["hospitalized"])
hospitalized_increase_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_INCREASE,
predictions=hospitalized_increase_f_all_locations,
num_forecast_steps=num_forecast_steps))
hospitalized_cumulative_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_CUMULATIVE,
predictions=hospitalized_cumulative_f_all_locations,
num_forecast_steps=num_forecast_steps))
icu_compartment = generic_seir_model_constructor.Compartment(
name=constants.ICU,
predictions=icu_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["icu"])
ventilator_compartment = generic_seir_model_constructor.Compartment(
name=constants.VENTILATOR,
predictions=ventilator_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["ventilator"])
def create_horizon_ahead_gt(gt):
"""Creates incremental (1-day) ground truth values."""
horizon_ahead_gt = {}
for location in gt:
horizon_ahead_gt[location] = (
gt[location][num_forecast_steps - 1:] -
gt[location][:-num_forecast_steps + 1])
return horizon_ahead_gt
death_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_DEATH,
predictions=death_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["death"])))
confirmed_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_CONFIRMED,
predictions=confirmed_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["confirmed"])))
rates_compartments = []
for name, predictions in rates.items():
rates_compartments.append(
generic_seir_model_constructor.Compartment(
name=name,
predictions=predictions,
num_forecast_steps=num_forecast_steps,
use_quantiles=False))
compartments = [
susceptible_compartment, exposed_compartment, infected_d_compartment,
infected_ud_compartment, recovered_d_compartment,
recovered_ud_compartment, death_d_compartment,
death_horizon_ahead_d_compartment, confirmed_compartment,
confirmed_horizon_ahead_d_compartment, hospitalized_compartment,
hospitalized_increase_compartment, hospitalized_cumulative_compartment,
icu_compartment, ventilator_compartment,
infected_ud_increase_compartment
]
compartments += rates_compartments
return compartments
def apply_quantile_transform(self,
hparams,
propagated_states,
quantile_kernel,
quantile_biases,
ground_truth_timeseries,
num_train_steps,
num_forecast_steps,
num_quantiles=23,
epsilon=1e-8,
is_training=True,
initial_quantile_step=0):
"""Transform predictions into vector representing different quantiles.
Args:
hparams: Hyperparameters.
propagated_states: single value predictions, its dimensions represent
timestep * states * location.
quantile_kernel: Quantile mapping kernel.
quantile_biases: Biases for quantiles.
ground_truth_timeseries: Ground truth time series.
num_train_steps: number of train steps
num_forecast_steps: number of forecasting steps
num_quantiles: Number of quantiles
epsilon: A small number to avoid 0 division issues.
is_training: Whether the phase is training or inference.
initial_quantile_step: start index for quantile training
Returns:
Vector value predictions of size
timestep * states * location * num_quantiles
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
quantile_encoding_window = hparams["quantile_encoding_window"]
smooth_coef = hparams["quantile_smooth_coef"]
partial_mean_interval = hparams["partial_mean_interval"]
quantile_mapping_kernel = tf.math.softplus(
tf.expand_dims(quantile_kernel, 2))
quantile_biases = tf.math.softplus(tf.expand_dims(quantile_biases, 1))
propagated_states_quantiles = []
state_quantiles_multiplier_prev = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
def gt_ratio_feature(gt_values,
predicted):
"""Creates the GT ratio feature."""
# This uses the imputed values when the values are not valid.
ratio_pred = (1 - (predicted[:num_train_steps, :] /
(epsilon + gt_values[:num_train_steps])))
# Add 0 at the beginning
ratio_pred = tf.concat([
0 * ratio_pred[:(quantile_encoding_window + num_forecast_steps), :],
ratio_pred
],
axis=0)
ratio_pred = tf.expand_dims(ratio_pred, 1)
ratio_pred = tf.tile(ratio_pred, [1, self.num_states, 1])
return ratio_pred
def indicator_feature(gt_indicator):
"""Creates the indicator feature."""
indicator = 1. - gt_indicator
# Add 0 at the beginning
indicator = tf.concat([
0 * indicator[:(quantile_encoding_window + num_forecast_steps), :],
indicator
],
axis=0)
indicator = tf.expand_dims(indicator, 1)
indicator = tf.tile(indicator, [1, self.num_states, 1])
return indicator
# Propagated states features
temp_propagated_states = tf.concat([
0 * propagated_states[:quantile_encoding_window, :, :],
propagated_states
],
axis=0)
# GT ratio features
death_gt_ratio_feature = gt_ratio_feature(gt_list["death"], pred_death)
confirmed_gt_ratio_feature = gt_ratio_feature(gt_list["confirmed"],
pred_confirmed)
hospitalized_gt_ratio_feature = gt_ratio_feature(gt_list["hospitalized"],
pred_hospitalized)
# Indicator features
death_indicator_feature = indicator_feature(gt_indicator["death"])
confirmed_indicator_feature = indicator_feature(gt_indicator["confirmed"])
hospitalized_indicator_feature = indicator_feature(
gt_indicator["hospitalized"])
for ti in range(initial_quantile_step,
num_train_steps + num_forecast_steps):
if ti < num_train_steps:
state_quantiles_multiplier = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
state_quantiles_multiplier = tf.tile(state_quantiles_multiplier,
[1, 1, num_quantiles])
else:
# Construct the input features to be used for quantile estimation.
encoding_input = []
# Features coming from the trend of the estimated.
encoding_input.append(1 - (
temp_propagated_states[ti:(ti + quantile_encoding_window), :, :] /
(epsilon +
temp_propagated_states[ti + quantile_encoding_window, :, :])))
# Features coming from the ground truth ratio of death.
encoding_input.append(
death_gt_ratio_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of confirmed.
encoding_input.append(
confirmed_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of hospitalized.
encoding_input.append(
hospitalized_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from death indicator.
encoding_input.append(
death_indicator_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from confirmed indicator.
encoding_input.append(
confirmed_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from hospitalized indicator.
encoding_input.append(
hospitalized_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
encoding_input_t = tf.expand_dims(tf.concat(encoding_input, axis=0), 3)
# Limit the range of features.
encoding_input_t = model_utils.apply_relu_bounds(
encoding_input_t,
lower_bound=0.0,
upper_bound=2.0,
replace_nan=True)
# Estimate the multipliers of quantiles
state_quantiles_multiplier = quantile_biases + tf.math.reduce_mean(
tf.multiply(encoding_input_t, quantile_mapping_kernel), 0)
# Consider accumulation to guarantee monotonicity
state_quantiles_multiplier = tf.math.cumsum(
state_quantiles_multiplier, axis=-1)
if partial_mean_interval == 0:
# Normalize to match the median to point forecasts
state_quantiles_multiplier /= (
epsilon + tf.expand_dims(
state_quantiles_multiplier[:, :,
(num_quantiles - 1) // 2], -1))
else:
# Normalize with major densities to approximate point forecast (mean)
median_idx = (num_quantiles - 1) // 2
normalize_start = median_idx - partial_mean_interval
normalize_end = median_idx + partial_mean_interval
normalizer = tf.reduce_mean(
0.5 *
(state_quantiles_multiplier[:, :, normalize_start:normalize_end] +
state_quantiles_multiplier[:, :, normalize_start +
1:normalize_end + 1]),
axis=2,
keepdims=True)
state_quantiles_multiplier /= (epsilon + normalizer)
state_quantiles_multiplier = (
smooth_coef * state_quantiles_multiplier_prev +
(1 - smooth_coef) * state_quantiles_multiplier)
state_quantiles_multiplier_prev = state_quantiles_multiplier
# Return the estimated quantiles
propagated_states_quantiles_timestep = tf.multiply(
tf.expand_dims(propagated_states[ti, :, :], 2),
state_quantiles_multiplier)
propagated_states_quantiles.append(propagated_states_quantiles_timestep)
return tf.stack(propagated_states_quantiles)
def extract_rate_list(self):
"""Return list of rates that correspond to 'propagated_variables' tensor.
Args: None.
Returns:
List of rate names.
"""
return constants.ICU_AND_VENTILATOR_RATE_LIST
def calculate_r_eff(self,
rates = None,
propagated_variables = None,
epsilon = 1e-8):
"""Calculate Basic Reproduction Number R_eff over time and locations.
Args:
rates: rate name->tensor maps.
propagated_variables: single tensor of variables indexed by
(time)x(variables)x(locations) (used in the training).
epsilon: epsilon for avoiding numerical error.
Returns:
R_eff tensor.
"""
if rates is not None and propagated_variables is not None:
raise ValueError("Only rates or seir_variables can be used.")
elif rates is None and propagated_variables is None:
raise ValueError("Have to specify one argument.")
elif rates is not None:
beta_d, beta_ud = rates["average_contact_id_rate"], rates[
"average_contact_iud_rate"]
rho_id, rho_iud = rates["recovery_id_rate"], rates["recovery_iud_rate"]
gamma, h = rates["diagnosis_rate"], rates["hospitalization_rate"]
kappa_id = rates["death_id_rate"]
# equation is computed from the Next Generation Matrix Method.
# If you are changing any of the parameters below, please make sure to
# update the Next Generation Matrix derivation and parameters too.
# LINT.IfChange
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
else:
propagated_variables_list = tf.unstack(propagated_variables, axis=1)
average_contact_id = propagated_variables_list[2]
average_contact_iud = propagated_variables_list[3]
diagnosis_rate = propagated_variables_list[6]
recovery_rate_id = propagated_variables_list[7]
recovery_rate_iud = propagated_variables_list[8]
hospitalization_rate = propagated_variables_list[12]
death_rate_id = propagated_variables_list[15]
beta_d = average_contact_id
beta_ud = average_contact_iud
rho_id = recovery_rate_id
rho_iud = recovery_rate_iud
gamma = diagnosis_rate
h = hospitalization_rate
kappa_id = death_rate_id
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
|
google-research/google-research
|
covid_epidemiology/src/models/generic_seir_state_model_constructor.py
|
Python
|
apache-2.0
| 44,854
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet 64x64."""
# pylint: disable=invalid-name,line-too-long
import ml_collections
def D(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
def get_config():
return D(
seed=0,
dataset=D(
name='ImageNet',
args=D(
image_size=64,
class_conditional=True,
randflip=True,
),
),
sampler='ddim',
model=D(
# architecture
name='unet_iddpm',
args=D(
ch=192,
emb_ch=768, # default is ch * 4
ch_mult=[1, 2, 3, 4],
num_res_blocks=3,
attn_resolutions=[8, 16, 32],
num_heads=None,
head_dim=64,
dropout=0.1,
logsnr_input_type='inv_cos',
resblock_resample=True,
),
mean_type='both', # eps, x, both, v
logvar_type='fixed_large',
mean_loss_weight_type='snr_trunc', # constant, snr, snr_trunc
# logsnr schedule
train_num_steps=0, # train in continuous time
eval_sampling_num_steps=1024,
train_logsnr_schedule=D(name='cosine',
logsnr_min=-20., logsnr_max=20.),
eval_logsnr_schedule=D(name='cosine',
logsnr_min=-20., logsnr_max=20.),
eval_clip_denoised=True,
),
train=D(
# optimizer
batch_size=2048,
optimizer='adam',
learning_rate=3e-4,
learning_rate_warmup_steps=1000,
weight_decay=0.0,
ema_decay=0.9999,
grad_clip=1.0,
substeps=10,
enable_update_skip=False,
# logging
log_loss_every_steps=100,
checkpoint_every_secs=900, # 15 minutes
retain_checkpoint_every_steps=20000, # old checkpoints won't get deleted
eval_every_steps=10000,
),
)
|
google-research/google-research
|
diffusion_distillation/diffusion_distillation/config/imagenet64_base.py
|
Python
|
apache-2.0
| 2,576
|
import os
import json
import time
import urllib2
import StringIO
import unittest
from jsonschema import validate
class TestBatchRecognition(unittest.TestCase):
def setUp(self):
time.sleep(1)
def test_batch_recognition(self):
response = self.get_response_for_wav()
self.assertResponseHasCorrectSchema(response)
def get_response_for_wav(self):
url = "http://127.0.0.1:8000/recognize?lang=en-towninfo&lm=new_lm"
wav = self.load_wav()
headers = {"Content-Type": "audio/x-wav; rate=16000;"}
request = urllib2.Request(url, wav, headers)
return urllib2.urlopen(request).read()
def load_wav(self):
basedir = os.path.dirname(os.path.realpath(__file__))
return open("%s/../resources/test.wav" % basedir, "rb").read()
def assertResponseHasCorrectSchema(self, response):
schema = {
"type": "object",
"properties": {
"result": {
"type": "array",
"items": {
"type": "object",
"properties": {
"alternative": {
"type": "array",
"items": {
"type": "object",
"properties": {
"confidence": {"type": "number"},
"transcript": {"type": "string"},
},
"required": ["confidence", "transcript"],
"additionalProperties": False,
},
"minItems": 1,
},
"final": {"type": "boolean"},
},
"required": ["alternative", "final"],
"additionalProperties": False,
},
"minItems": 1,
},
"result_index": {"type": "number"},
"chunk_id": {"type": "string"},
"request_id": {"type": "string"},
},
"required": ["result", "result_index", "request_id"],
"additionalProperties": False,
}
validationResult = validate(json.loads(response), schema)
self.assertIsNone(validationResult, msg="Response has invalid schema")
|
UFAL-DSG/cloud-asr
|
tests/test_batch_recognition.py
|
Python
|
apache-2.0
| 2,520
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jordan-Wigner transform on fermionic operators."""
import itertools
import numpy
from openfermion.ops import (DiagonalCoulombHamiltonian, FermionOperator,
InteractionOperator, QubitOperator)
from openfermion.utils import count_qubits
def jordan_wigner(operator):
""" Apply the Jordan-Wigner transform to a FermionOperator,
InteractionOperator, or DiagonalCoulombHamiltonian to convert
to a QubitOperator.
Operators are mapped as follows:
a_j^\dagger -> Z_0 .. Z_{j-1} (X_j - iY_j) / 2
a_j -> Z_0 .. Z_{j-1} (X_j + iY_j) / 2
Returns:
transformed_operator: An instance of the QubitOperator class.
Warning:
The runtime of this method is exponential in the maximum locality
of the original FermionOperator.
Raises:
TypeError: Operator must be a FermionOperator,
DiagonalCoulombHamiltonian, or InteractionOperator.
"""
if isinstance(operator, InteractionOperator):
return jordan_wigner_interaction_op(operator)
if isinstance(operator, DiagonalCoulombHamiltonian):
return jordan_wigner_diagonal_coulomb_hamiltonian(operator)
if not isinstance(operator, FermionOperator):
raise TypeError("Operator must be a FermionOperator, "
"DiagonalCoulombHamiltonian, or "
"InteractionOperator.")
transformed_operator = QubitOperator()
for term in operator.terms:
# Initialize identity matrix.
transformed_term = QubitOperator((), operator.terms[term])
# Loop through operators, transform and multiply.
for ladder_operator in term:
z_factors = tuple((index, 'Z') for
index in range(ladder_operator[0]))
pauli_x_component = QubitOperator(
z_factors + ((ladder_operator[0], 'X'),), 0.5)
if ladder_operator[1]:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), -0.5j)
else:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), 0.5j)
transformed_term *= pauli_x_component + pauli_y_component
transformed_operator += transformed_term
return transformed_operator
def jordan_wigner_diagonal_coulomb_hamiltonian(operator):
n_qubits = count_qubits(operator)
qubit_operator = QubitOperator((), operator.constant)
# Transform diagonal one-body terms
for p in range(n_qubits):
coefficient = operator.one_body[p, p] + operator.two_body[p, p]
qubit_operator += QubitOperator(((p, 'Z'),), -.5 * coefficient)
qubit_operator += QubitOperator((), .5 * coefficient)
# Transform other one-body terms and two-body terms
for p, q in itertools.combinations(range(n_qubits), 2):
# One-body
real_part = numpy.real(operator.one_body[p, q])
imag_part = numpy.imag(operator.one_body[p, q])
parity_string = [(i, 'Z') for i in range(p + 1, q)]
qubit_operator += QubitOperator(
[(p, 'X')] + parity_string + [(q, 'X')], .5 * real_part)
qubit_operator += QubitOperator(
[(p, 'Y')] + parity_string + [(q, 'Y')], .5 * real_part)
qubit_operator += QubitOperator(
[(p, 'Y')] + parity_string + [(q, 'X')], .5 * imag_part)
qubit_operator += QubitOperator(
[(p, 'X')] + parity_string + [(q, 'Y')], -.5 * imag_part)
# Two-body
coefficient = operator.two_body[p, q]
qubit_operator += QubitOperator(((p, 'Z'), (q, 'Z')), .5 * coefficient)
qubit_operator += QubitOperator((p, 'Z'), -.5 * coefficient)
qubit_operator += QubitOperator((q, 'Z'), -.5 * coefficient)
qubit_operator += QubitOperator((), .5 * coefficient)
return qubit_operator
def jordan_wigner_interaction_op(iop, n_qubits=None):
"""Output InteractionOperator as QubitOperator class under JW transform.
One could accomplish this very easily by first mapping to fermions and
then mapping to qubits. We skip the middle step for the sake of speed.
This only works for real InteractionOperators (no complex numbers).
Returns:
qubit_operator: An instance of the QubitOperator class.
"""
if n_qubits is None:
n_qubits = count_qubits(iop)
if n_qubits < count_qubits(iop):
raise ValueError('Invalid number of qubits specified.')
# Initialize qubit operator as constant.
qubit_operator = QubitOperator((), iop.constant)
# Transform diagonal one-body terms
for p in range(n_qubits):
coefficient = iop[(p, 1), (p, 0)]
qubit_operator += jordan_wigner_one_body(p, p, coefficient)
# Transform other one-body terms and "diagonal" two-body terms
for p, q in itertools.combinations(range(n_qubits), 2):
# One-body
coefficient = .5 * (iop[(p, 1), (q, 0)] + iop[(q, 1), (p, 0)])
qubit_operator += jordan_wigner_one_body(p, q, coefficient)
# Two-body
coefficient = (iop[(p, 1), (q, 1), (p, 0), (q, 0)] -
iop[(p, 1), (q, 1), (q, 0), (p, 0)] -
iop[(q, 1), (p, 1), (p, 0), (q, 0)] +
iop[(q, 1), (p, 1), (q, 0), (p, 0)])
qubit_operator += jordan_wigner_two_body(p, q, p, q, coefficient)
# Transform the rest of the two-body terms
for (p, q), (r, s) in itertools.combinations(
itertools.combinations(range(n_qubits), 2),
2):
coefficient = (iop[(p, 1), (q, 1), (r, 0), (s, 0)] -
iop[(p, 1), (q, 1), (s, 0), (r, 0)] -
iop[(q, 1), (p, 1), (r, 0), (s, 0)] +
iop[(q, 1), (p, 1), (s, 0), (r, 0)])
qubit_operator += jordan_wigner_two_body(p, q, r, s, coefficient)
return qubit_operator
def jordan_wigner_one_body(p, q, coefficient=1.):
"""Map the term a^\dagger_p a_q + a^\dagger_q a_p to QubitOperator.
Note that the diagonal terms are divided by a factor of 2
because they are equal to their own Hermitian conjugate.
"""
# Handle off-diagonal terms.
qubit_operator = QubitOperator()
if p != q:
a, b = sorted([p, q])
parity_string = tuple((z, 'Z') for z in range(a + 1, b))
for operator in ['X', 'Y']:
operators = ((a, operator),) + parity_string + ((b, operator),)
qubit_operator += QubitOperator(operators, .5 * coefficient)
# Handle diagonal terms.
else:
qubit_operator += QubitOperator((), .5 * coefficient)
qubit_operator += QubitOperator(((p, 'Z'),), -.5 * coefficient)
return qubit_operator
def jordan_wigner_two_body(p, q, r, s, coefficient=1.):
"""Map the term a^\dagger_p a^\dagger_q a_r a_s + h.c. to QubitOperator.
Note that the diagonal terms are divided by a factor of two
because they are equal to their own Hermitian conjugate.
"""
# Initialize qubit operator.
qubit_operator = QubitOperator()
# Return zero terms.
if (p == q) or (r == s):
return qubit_operator
# Handle case of four unique indices.
elif len(set([p, q, r, s])) == 4:
# Loop through different operators which act on each tensor factor.
for operator_p, operator_q, operator_r in itertools.product(
['X', 'Y'], repeat=3):
if [operator_p, operator_q, operator_r].count('X') % 2:
operator_s = 'X'
else:
operator_s = 'Y'
# Sort operators.
[(a, operator_a), (b, operator_b),
(c, operator_c), (d, operator_d)] = sorted(
[(p, operator_p), (q, operator_q),
(r, operator_r), (s, operator_s)],
key=lambda pair: pair[0])
# Computer operator strings.
operators = ((a, operator_a),)
operators += tuple((z, 'Z') for z in range(a + 1, b))
operators += ((b, operator_b),)
operators += ((c, operator_c),)
operators += tuple((z, 'Z') for z in range(c + 1, d))
operators += ((d, operator_d),)
# Get coefficients.
coeff = .125 * coefficient
parity_condition = bool(operator_p != operator_q or
operator_p == operator_r)
if (p > q) ^ (r > s):
if not parity_condition:
coeff *= -1.
elif parity_condition:
coeff *= -1.
# Add term.
qubit_operator += QubitOperator(operators, coeff)
# Handle case of three unique indices.
elif len(set([p, q, r, s])) == 3:
# Identify equal tensor factors.
if p == r:
a, b = sorted([q, s])
c = p
elif p == s:
a, b = sorted([q, r])
c = p
elif q == r:
a, b = sorted([p, s])
c = q
elif q == s:
a, b = sorted([p, r])
c = q
# Get operators.
parity_string = tuple((z, 'Z') for z in range(a + 1, b))
pauli_z = QubitOperator(((c, 'Z'),))
for operator in ['X', 'Y']:
operators = ((a, operator),) + parity_string + ((b, operator),)
# Get coefficient.
if (p == s) or (q == r):
coeff = .25 * coefficient
else:
coeff = -.25 * coefficient
# Add term.
hopping_term = QubitOperator(operators, coeff)
qubit_operator -= pauli_z * hopping_term
qubit_operator += hopping_term
# Handle case of two unique indices.
elif len(set([p, q, r, s])) == 2:
# Get coefficient.
if p == s:
coeff = -.25 * coefficient
else:
coeff = .25 * coefficient
# Add terms.
qubit_operator -= QubitOperator((), coeff)
qubit_operator += QubitOperator(((p, 'Z'),), coeff)
qubit_operator += QubitOperator(((q, 'Z'),), coeff)
qubit_operator -= QubitOperator(((min(q, p), 'Z'), (max(q, p), 'Z')),
coeff)
return qubit_operator
|
jarrodmcc/OpenFermion
|
src/openfermion/transforms/_jordan_wigner.py
|
Python
|
apache-2.0
| 10,817
|
#!/usr/bin/env python
import os
NFQ_NUMBER=42
ZMQ_ADDR=os.getenv('EQ_ETHER_ZMQ_ADDR')
import pyearthquake
from pyearthquake.middlebox.nfqhook import NFQHook
LOG = pyearthquake.LOG.getChild(__name__)
if __name__ == '__main__':
LOG.info("Please run `iptables -A OUTPUT -p tcp -m owner --uid-owner $(id -u nfqhooked) -j NFQUEUE --queue-num %d` before running this hook.", NFQ_NUMBER)
hook = NFQHook(nfq_number=NFQ_NUMBER, zmq_addr=ZMQ_ADDR)
hook.start()
|
fuku-ys/earthquake
|
example/not-so-much-useful/tcp-ex-lo.nfqhook/materials/sample_nfqhook.py
|
Python
|
apache-2.0
| 466
|
from awxkit.utils import random_title, update_payload, filter_by_class, PseudoNamespace
from awxkit.api.resources import resources
from awxkit.api.pages import Organization
from awxkit.api.mixins import HasCreate, DSAdapter
from . import page
from . import base
class OAuth2Application(HasCreate, base.Base):
dependencies = [Organization]
def payload(self, **kwargs):
payload = PseudoNamespace(name=kwargs.get('name') or 'OAuth2Application - {}'.format(random_title()),
description=kwargs.get('description') or random_title(10),
client_type=kwargs.get('client_type', 'public'),
authorization_grant_type=kwargs.get('authorization_grant_type', 'password'))
if kwargs.get('organization'):
payload.organization = kwargs['organization'].id
optional_fields = ('redirect_uris', 'skip_authorization')
update_payload(payload, optional_fields, kwargs)
return payload
def create_payload(self, organization=Organization, **kwargs):
self.create_and_update_dependencies(*filter_by_class((organization, Organization)))
organization = self.ds.organization if organization else None
payload = self.payload(organization=organization, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, organization=Organization, **kwargs):
payload = self.create_payload(organization=organization, **kwargs)
return self.update_identity(OAuth2Applications(self.connection).post(payload))
page.register_page((resources.application,
(resources.applications, 'post')), OAuth2Application)
class OAuth2Applications(page.PageList, OAuth2Application):
pass
page.register_page(resources.applications, OAuth2Applications)
class OAuth2AccessToken(HasCreate, base.Base):
optional_dependencies = [OAuth2Application]
def payload(self, **kwargs):
payload = PseudoNamespace(description=kwargs.get('description') or random_title(10),
scope=kwargs.get('scope', 'write'))
if kwargs.get('oauth_2_application'):
payload.application = kwargs['oauth_2_application'].id
optional_fields = ('expires',)
update_payload(payload, optional_fields, kwargs)
return payload
def create_payload(self, oauth_2_application=None, **kwargs):
self.create_and_update_dependencies(*filter_by_class((oauth_2_application, OAuth2Application)))
oauth_2_application = self.ds.oauth_2_application if oauth_2_application else None
payload = self.payload(oauth_2_application=oauth_2_application, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, oauth_2_application=None, **kwargs):
payload = self.create_payload(oauth_2_application=oauth_2_application, **kwargs)
return self.update_identity(OAuth2AccessTokens(self.connection).post(payload))
page.register_page((resources.token,
(resources.tokens, 'post')), OAuth2AccessToken)
class OAuth2AccessTokens(page.PageList, OAuth2AccessToken):
pass
page.register_page(resources.tokens, OAuth2AccessTokens)
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/applications.py
|
Python
|
apache-2.0
| 3,349
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Rackspace Hosting
#
# Author: Thomas Maddox <thomas.maddox@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Plugins module for Ceilometer load testing.
"""
import graphite
import log
import statsD
mapping = {'log': log.LogDriver,
'graphite': graphite.GraphiteDriver,
'statsd': statsD.StatsDDriver}
def initialize_plugins(test_name, plugin_conf):
plugin_list = []
for name, plugin in mapping.iteritems():
if name in plugin_conf:
plugin_list.append(plugin(test_name, **plugin_conf[name]))
return plugin_list
def invoke(method, plugins, *args, **kwargs):
for plugin in plugins:
getattr(plugin, method)(*args, **kwargs)
|
rackerlabs/ceilometer-load-tests
|
src/plugins/__init__.py
|
Python
|
apache-2.0
| 1,251
|
from .response import Response
from ..reallyobject import ReallyObject
class ReadResponse(Response):
def __init__(self, raw):
super(ReadResponse, self).__init__(raw)
@property
def next_token(self):
if self._raw.get('body') and self._raw.get('body').get('tokens'):
return self._raw['body']['tokens']['nextToken']
else:
return None
@property
def prev_token(self):
if self._raw.get('body') and self._raw.get('body').get('tokens'):
return self._raw['body']['tokens']['prevToken']
else:
return None
@property
def items(self):
if self._raw.get('body'):
return map(ReallyObject, self._raw['body']['items'])
else:
return []
@property
def count(self):
if self._raw.get('body'):
return self._raw['body']['totalResults']
else:
return []
|
reallylabs/pyreally
|
pyreally/responses/read.py
|
Python
|
apache-2.0
| 935
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from delf import delf_config_pb2
from delf import extractor
class ExtractorTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('Max-1Min-1', -1, -1, [4, 2, 3], 1.0),
('Max2Min-1', 2, -1, [2, 1, 3], 0.5),
('Max8Min-1', 8, -1, [4, 2, 3], 1.0),
('Max-1Min1', -1, 1, [4, 2, 3], 1.0),
('Max-1Min8', -1, 8, [8, 4, 3], 2.0),
('Max16Min8', 16, 8, [8, 4, 3], 2.0),
('Max2Min2', 2, 2, [2, 1, 3], 0.5),
)
def testResizeImageWorks(self, max_image_size, min_image_size, expected_shape,
expected_scale_factor):
# Construct image of size 4x2x3.
image = np.array([[[0, 0, 0], [1, 1, 1]], [[2, 2, 2], [3, 3, 3]],
[[4, 4, 4], [5, 5, 5]], [[6, 6, 6], [7, 7, 7]]],
dtype='uint8')
# Set up config.
config = delf_config_pb2.DelfConfig(
max_image_size=max_image_size, min_image_size=min_image_size)
resized_image, scale_factor = extractor.ResizeImage(image, config)
self.assertAllEqual(resized_image.shape, expected_shape)
self.assertAllClose(scale_factor, expected_scale_factor)
if __name__ == '__main__':
tf.test.main()
|
alexgorban/models
|
research/delf/delf/python/examples/extractor_test.py
|
Python
|
apache-2.0
| 2,114
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import MySQLdb
import os
# 打开数据库连接
db = MySQLdb.connect("localhost","root","12345678","test")
cursor = db.cursor()
count = 0
os.chdir('information')
path1 = os.getcwd()
s1 = os.listdir(path1)
for i in s1:
if i[0] == '.':
s1.remove(i)
for i in s1:
path2 = path1+'/'+i
s2 = os.listdir(path2)
for j in s2:
if j[0] == '.':
s2.remove(j)
for j in s2:
path3 = path2+'/'+j
s3 = os.listdir(path3)
for k in s3:
if k[0] == '.':
s3.remove(k)
for k in s3:
path4 = path3+'/'+k
s4=os.listdir(path4)
for l in s4:
count += 1
info = []
name = path4+'/'+l
#print name
file_object = open(name)
lineList = file_object.readlines()
for lineNum in range(10):
if lineNum != 2:
if lineNum in range(6, 10):
lineList[lineNum] = lineList[lineNum][15:]
if lineNum in range(3, 6):
if lineList[lineNum]=='暂无\n':
lineList[lineNum] = '-1\n'
else:
lineList[lineNum] = lineList[lineNum][:len(lineList[lineNum])-3]
info.append(lineList[lineNum][:len(lineList[lineNum])-1])
introduction = ' '
for lineNum in range(11, len(lineList)):
introduction+=lineList[lineNum][:len(lineList[lineNum])-1]
introduction=introduction.replace('\r', '')
introduction=introduction.replace(r"\\'", '\"')
sql = """INSERT INTO main_book(id, bookID, bookName, pictureUrl, pricePerMonth, priceHalfYear, pricePerYear,
cBussiness, standNumber, mailNumber, bookType, bookKind1, bookKind2, bookKind3, introduction) VALUES
(NULL, %(bookID)d, '%(bookName)s', '%(pictureUrl)s', %(pricePerMonth)f, %(priceHalfYear)f, %(pricePerYear)f,
'%(cBussiness)s', '%(standNumber)s', '%(mailNumber)s', '%(bookType)s', '%(bookKind1)s', '%(bookKind2)s',
'%(bookKind3)s', '%(introduction)s')"""%{'bookID': count, 'bookName': info[0], 'pictureUrl': info[1],
'pricePerMonth': float(info[2]), 'priceHalfYear': float(info[3]), 'pricePerYear': float(info[4]), 'cBussiness':
info[5], 'standNumber': info[6], 'mailNumber': info[7], 'bookType': info[8], 'bookKind1': i, 'bookKind2':
j, 'bookKind3': k, 'introduction': introduction}
try:
cursor.execute(sql)
db.commit()
except:
print sql
db.rollback()
db.close()
|
JiaruZhang/Five
|
addBookData.py
|
Python
|
apache-2.0
| 2,903
|
#IBM_PROLOG_BEGIN_TAG
#
# Copyright 2015,2018 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#IBM_PROLOG_END_TAG
"""
pyecmd - a Pythonic wrapper around the eCMD Python API
Naming:
=======
* Functions and classes drop the ecmd prefix,
e.g. ecmdDataBuffer becomes pyecmd.DataBuffer and
ecmdQueryDllInfo() becomes pyecmd.queryDllInfo()
* ecmdChipTarget becomes pyecmd.Target for brevity
* Extension functions and constants are exposed through pyecmd without
renaming, e.g. pyecmd.ECMD_SELECTED_TARGETS_LOOP or Target.croQuerySpy()
Function Results:
=================
* All bad eCMD return codes are internally converted to exceptions
(EcmdError or a subclass of it)
* eCMD functions that have exactly one output parameter return just that.
* eCMD functions that have more than one output parameter return a tuple
containing all output parameters in their original order
Data Buffers:
=============
* ecmdDataBuffer is replaced by the awesome bitstring module
(http://pythonhosted.org/bitstring/), which is much more Pythonic.
All functions returning data return a bitstring.BitArray.
* Input data can be supplied as a bitstring (Bits, BitArray, ...),
or a string that can be fed into a bistring constructor like "0x1234".
If the method in question takes a fixed amount of data (like putScom,
putCfamRegister, ...), you can also simply supply an integer value.
Targets:
========
* All eCMD functions whose first parameter is a target become methods of Target.
* Initializing a target is easy, field states (UNUSED, VALID, WILDCARD)
are handled by the Target class internally.
* Target looping is encapsulated in pyecmd.loopTargets() which
will simply return a list of targets.
Init and Shutdown:
==================
* Initialization and shutdown is handled through a context handler class
called pyecmd.Ecmd - do your work inside a with statement and you can
be sure eCMD is properly shut down even in the face of an exception.
@author: Joachim Fenkes <fenkes@de.ibm.com>
Here's a simple and very basic program using pyecmd:
from __future__ import print_function # not needed on python3
import pyecmd, sys
with pyecmd.Ecmd(args=sys.argv):
for target in pyecmd.loopTargets("pu", pyecmd.ECMD_SELECTED_TARGETS_LOOP):
print("Working on target %s" % target)
# Output TP probe settings
root_ctrl1 = target.getScom(0x50011)
print("Probe0 select: %d -- Probe1 select: %d" % (root_ctrl1[0:4].uint, root_ctrl1[4:8].uint))
# drop fence 0
root_ctrl0 = target.getScom(0x50010)
root_ctrl0[0] = False
target.putScom(0x50010, root_ctrl0)
# reset FSI2PIB engine
target.putCfamRegister(0x1006, 0)
"""
import ecmd
from .base import *
from .base import _rcwrap, _bufwrap, _from_ecmdDataBuffer, _to_ecmdDataBuffer
from .generated import *
from .generated import _Target
from .constants import *
from .ecmdbitstring import *
class EcmdWrapperError(Exception):
pass
def _map_attr_with_state(obj, name):
# Global method to avoid implicit recursive __getattr__ call
if name in Target._attrs_with_state:
state = super(ecmd.ecmdChipTarget, obj).__getattribute__(name + "State")
if state == ecmd.ECMD_TARGET_FIELD_WILDCARD:
return "*"
elif state == ecmd.ECMD_TARGET_FIELD_UNUSED:
return None
return super(ecmd.ecmdChipTarget, obj).__getattribute__(name)
class Target(_Target):
"""
Extends the base ecmdChipTarget by several convenience functions:
* Default constructor initializes all states to UNUSED instead of INVALID
* Constructor optionally takes another target and acts as a copy constructor in that case
* Constructor takes arbitrary keyword arguments specifying target attributes
* Setting a target attribute implicitly sets its state attribute:
* Use None for UNUSED
* Use "*" for WILDCARD
* Use any other value for VALID
"""
_attrs_with_state = ("cage", "node", "slot", "chipType", "pos", "chipUnitType", "core", "chipUnitNum", "thread", "unitId")
def __setattr__(self, name, value):
if name in Target._attrs_with_state:
if value == "*":
super(Target, self).__setattr__(name + "State", ecmd.ECMD_TARGET_FIELD_WILDCARD)
elif value == None:
super(Target, self).__setattr__(name + "State", ecmd.ECMD_TARGET_FIELD_UNUSED)
else:
super(Target, self).__setattr__(name, value)
super(Target, self).__setattr__(name + "State", ecmd.ECMD_TARGET_FIELD_VALID)
else:
super(Target, self).__setattr__(name, value)
def __getattribute__(self, name):
if name in Target._attrs_with_state:
return _map_attr_with_state(self, name)
return super(Target, self).__getattribute__(name)
def __init__(self, template=None, **kwargs):
super(Target, self).__init__()
if template:
for attr in Target._attrs_with_state:
self.__setattr__(attr, _map_attr_with_state(template, attr))
else:
for attr in Target._attrs_with_state:
self.__setattr__(attr, None)
for (attr, value) in kwargs.items():
setattr(self, attr, value)
def __repr__(self):
return ecmd.ecmdWriteTarget(self, ecmd.ECMD_DISPLAY_TARGET_DEFAULT).replace("\t", " ").strip()
def related_targets(self, target_type, mode=ecmd.ECMD_DYNAMIC_LOOP,
filter_chip_units=None):
"""
List all targets of the given "chip[.unit]" type that are "related" to this target.
Examples:
For a "pu" target, "pu.c" will yield all cores.
For a "pu.eq" target, "pu.c" will yield all cores attached to that cache.
For a "pu.c" target, "pu.ex" will yield the core's virtual EX target.
For a "pu.perv" target for chiplet 0x2E, "pu.c" will yield core 14,
but "pu.eq" will yield nothing because the perv target is the core's perv target.
@param filter_chip_units If specified, a list of chipUnitNums to only include in the result.
@type target_type str
@type mode int
@type filter_chip_units list(int)
@rtype list(Target)
"""
result = ecmd.ecmdChipTargetList()
_rcwrap(ecmd.ecmdRelatedTargets(self, target_type, result, mode))
return [self.__class__(target) for target in result if filter_chip_units is None or target.chipUnitNum in filter_chip_units]
# alias to match eCMD API spelling
relatedTargets = related_targets
def fapi2GetAttr(self, i_id):
rc, data = ecmd.fapi2GetAttr(self, i_id)
if rc == 0x0206005A: # FAPI_UNSUPPORTED_ATTRIBUTE
raise KeyError(i_id)
_rcwrap(rc)
return data
def fapi2SetAttr(self, i_id, i_data):
_rcwrap(ecmd.fapi2SetAttr(self, i_id, i_data))
class Ecmd(object):
def __init__(self, dll="", version="ver14", args=None, **kwargs):
self.dll = dll
self.version = version
self.args = args
self.extensions = kwargs
def __enter__(self):
_rcwrap(ecmd.ecmdLoadDll(self.dll, self.version))
setGlobalVar(ecmd.ECMD_GLOBALVAR_QUIETERRORMODE, 1)
if self.args:
_rcwrap(ecmd.ecmdCommandArgs(self.args))
for (name, version) in self.extensions.items():
getattr(ecmd, name+"InitExtension")(version)
return self
def __exit__(self, exc_type, exc_value, traceback):
ecmd.ecmdUnloadDll()
def loopTargets(target, looptype, mode=ecmd.ECMD_DYNAMIC_LOOP, warn_if_no_targets=False):
"""
target can be either a Target or a string containing a chip[.unit] specification
@rtype: list(Target)
"""
try:
if "." in target:
chip_type, unit_type = target.split(".", 2)
unit_num = "*"
else:
chip_type = target
unit_type = None
unit_num = None
target = Target(chipType=chip_type, chipUnitType=unit_type,
cage="*", node="*", slot="*", pos="*", chipUnitNum=unit_num)
except TypeError:
pass # target seems to be a Target, which is what we need
state = ecmd.ecmdLooperData()
my_target = target.__class__(target)
result = []
_rcwrap(ecmd.ecmdLooperInit(my_target, looptype, state, mode))
while ecmd.ecmdLooperNext(my_target, state, mode):
result.append(target.__class__(my_target))
if warn_if_no_targets and not result:
print("WARNING: Your selection of targets did not appear to yield any results.")
print(" Nothing will happen. You might need to specify a -c parameter, like -call.")
return result
def loadDataBuffer(filename, save_format = ecmd.ECMD_SAVE_FORMAT_BINARY):
"""
Load a file saved by ecmdDataBuffer and return an EcmdBitArray containing the data
@rtype: EcmdBitArray
"""
buf = ecmd.ecmdDataBuffer()
_bufwrap(buf.readFile(filename, save_format))
return _from_ecmdDataBuffer(buf)
def saveDataBuffer(data, filename, save_format = ecmd.ECMD_SAVE_FORMAT_BINARY):
"""
Save the contents of a bitstring to a file in ecmdDataBuffer format
"""
buf = _to_ecmdDataBuffer(data)
_bufwrap(buf.writeFile(filename, save_format))
|
mklight/eCMD
|
ecmd-core/pyecmd/__init__.py
|
Python
|
apache-2.0
| 9,894
|
#!/usr/bin/env python
from setuptools import setup
setup(name='django_crowd_authz',
version='1.0.1',
description='Crowd Authentication for Django',
author='Daniel Craigmile',
author_email='danielc@pobox.com',
url='https://github.com/x110dc/django_crowd_authz',
py_modules=['auth.backends'],
install_requires=['requests == 1.2.3'],
)
|
x110dc/django_crowd_authz
|
setup.py
|
Python
|
apache-2.0
| 383
|
# Tim Cornwell <realtimcornwell@gmail.com>
#
# Definition of structures needed by the function interface. These are mostly
# subclasses of astropy classes.
#
import numpy as numpy
from astropy.table import Table
import os
import numpy
import astropy.units as units
from astropy.coordinates import SkyCoord, ICRS, EarthLocation
from astropy.table import Table, Column, vstack
from astropy.wcs import WCS
from astropy.io.fits import HDUList, PrimaryHDU, BinTableHDU, table_to_hdu
import astropy.io.fits as fits
from astropy import units
import h5py
from crocodile.simulate import *
from arl.data_models import *
from arl.parameters import crocodile_path
from util.read_oskar_vis import OskarVis
import logging
log = logging.getLogger(__name__)
def filter_configuration(fc: Configuration, params={}):
""" Filter a configuration e.g. remove certain antennas
:param fc:
:type Configuration:
:param params: Dictionary containing parameters
:returns: Configuration
"""
log.error("filter_configuration: No filter implemented yet")
return fc
def create_configuration_from_array(antxyz: numpy.array, name: str = None, location: EarthLocation = None,
mount: str = 'alt-az', names: str = '%d', meta: dict = None, params={}):
""" Define from parts
:param name:
:param antxyz: locations of antennas numpy.array[...,3]
:type numpy.array:
:param location: Location of array centre (reference for antenna locations)
:type EarthLocation:
:param mount: Mount type e.g. 'altaz'
:type str:
:param names: Generator for names e.g. 'SKA1_MID%d'
:type generator:
:type meta:
:type dict:
:returns: Configuration
"""
fc = Configuration()
assert len(antxyz) == 2, "Antenna array has wrong shape"
fc.data = Table(data=[names, antxyz, mount], names=["names", "xyz", "mount"], meta=meta)
fc.location = location
return fc
def create_configuration_from_file(antfile: str, name: str = None, location: EarthLocation = None, mount: str = 'altaz',
names: str = "%d", frame: str = 'local',
meta: dict = None,
params={}):
""" Define from a file
:param antfile: Antenna file name
:type str:
:param name: Name of array e.g. 'LOWBD2'
:type str:
:param location:
:type EarthLocation:
:param mount: mount type: 'altaz', 'xy'
:type str:
:param frame: 'local' | 'global'
:type str:
:param meta: Any meta info
:type dict:
:returns: Configuration
"""
fc = Configuration()
fc.name = name
fc.location = location
antxyz = numpy.genfromtxt(antfile, delimiter=",")
assert antxyz.shape[1] == 3, ("Antenna array has wrong shape %s" % antxyz.shape)
nants = antxyz.shape[0]
if frame == 'local':
latitude = location.geodetic[1].to(units.rad).value
antxyz = xyz_at_latitude(antxyz, latitude)
xyz = Column(antxyz, name="xyz")
anames = [names % ant for ant in range(nants)]
mounts = Column(numpy.repeat(mount, nants), name="mount")
fc.data = Table(data=[anames, xyz, mounts], names=["names", "xyz", "mount"], meta=meta)
fc.frame = frame
return fc
def create_LOFAR_configuration(antfile: str, meta: dict = None,
params={}):
""" Define from the LOFAR configuration file
:param antfile:
:type str:
:param name:
:type str:
:param meta:
:type dict:
:param params: Dictionary containing parameters
:returns: Configuration
"""
fc = Configuration()
antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",")
nants = antxyz.shape[0]
assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape
anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",")
mounts = Column(numpy.repeat('XY', nants), name="mount")
fc.data = Table(data=[anames, antxyz, mounts], names=["names", "xyz", "mount"], meta=meta)
fc.location = EarthLocation(x=[3826923.9] * units.m, y=[460915.1] * units.m, z=[5064643.2] * units.m)
return fc
def create_named_configuration(name: str = 'LOWBD2', params={}):
""" Standard configurations e.g. LOWBD2, MIDBD2
:param name: name of Configuration LOWBD2, LOWBD1, LOFAR, VLAA
:type str:
:returns: Configuration
"""
if name == 'LOWBD2':
location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0)
fc = create_configuration_from_file(antfile=crocodile_path("data/configurations/LOWBD2.csv"),
location=location, mount='xy',
names='LOWBD2_%d', name=name)
elif name == 'LOWBD1':
location = EarthLocation(lon="116.4999", lat="-26.7000", height=300.0)
fc = create_configuration_from_file(antfile=crocodile_path("data/configurations/LOWBD1.csv"),
location=location, mount='xy',
names='LOWBD1_%d', name=name)
elif name == 'LOFAR':
fc = create_LOFAR_configuration(antfile=crocodile_path("data/configurations/LOFAR.csv"))
elif name == 'VLAA':
location = EarthLocation(lon="-107.6184", lat="34.0784", height=2124.0)
fc = create_configuration_from_file(antfile=crocodile_path("data/configurations/VLA_A_hor_xyz.csv"),
location=location,
mount='altaz', names='VLA_%d',name=name)
elif name == 'VLAA_north': # Pseudo-VLAA at north pole
location = EarthLocation(lon="-107.6184", lat="90.000", height=2124.0)
fc = create_configuration_from_file(antfile=crocodile_path("data/configurations/VLA_A_hor_xyz.csv"),
location=location,
mount='altaz', names='VLA_%d', name=name)
elif name == 'LOWBD2_north': # Pseudo-SKA-LOW at north pole
location = EarthLocation(lon="116.4999", lat="90.000", height=300.0)
fc = create_configuration_from_file(antfile=crocodile_path("data/configurations/LOWBD2.csv"),
location=location, mount='xy',
names='LOWBD2_%d', name=name)
else:
fc = Configuration()
raise UserWarning("No such Configuration %s" % name)
return fc
def import_visibility_from_ms(msfile: str, params={}) -> Visibility:
""" Import a visibility set from a measurement set
:param msfile: Name of measurement set
:type str:
:returns: Visibility
"""
log.error('test_support.import_visibility_from_ms: not yet implemented')
return Visibility()
def export_visibility_to_ms(vis: Visibility, msfile: str = None, params={}) -> Visibility:
""" Export a visibility set to a measurement set
:param vis: Name of visibility set
:param Visibility:
:param msfile: Name of output measurement set
:type str:
:returns: Visibility
"""
log.error('test_support.visibility_from_ms: not yet implemented')
def import_visibility_from_oskar(oskar_file: str, params={}) -> Visibility:
""" Import a visibility set from an OSKAR visibility file
:param oskar_file: Name of OSKAR visibility file
:type str:
:returns: Visibility
"""
# Extract data from Oskar file
oskar_vis = OskarVis(oskar_file)
ra,dec = oskar_vis.phase_centre()
a1,a2 = oskar_vis.stations(flatten=True)
# Make configuration
location = EarthLocation(lon = oskar_vis.telescope_lon,
lat = oskar_vis.telescope_lat,
height = oskar_vis.telescope_alt)
antxyz = numpy.transpose([oskar_vis.station_x,
oskar_vis.station_y,
oskar_vis.station_z])
name = oskar_vis.telescope_path
if name == '':
name = 'oskar-import'
config = Configuration(
name = name,
location = location,
xyz = antxyz
)
# Assume exactly one frequency and polarisation - that is the only
# supported case right now.
amps = oskar_vis.amplitudes(flatten=True)
amps = amps.reshape(list(amps.shape) + [1,1])
# Construct visibilities
return Visibility(
frequency = [oskar_vis.frequency(i) for i in range(oskar_vis.num_channels)],
phasecentre = SkyCoord(frame=ICRS, ra=ra, dec=dec, unit=units.deg),
configuration = config,
uvw = numpy.transpose(oskar_vis.uvw(flatten=True)),
time = oskar_vis.times(flatten=True),
antenna1 = a1,
antenna2 = a2,
vis = amps,
weight = numpy.ones(amps.shape))
def configuration_to_hdu(configuration : Configuration) -> BinTableHDU:
# Convert to HDU
hdu = table_to_hdu(configuration.data)
# Save rest of data into header fields (offensively ad-hoc, obviously)
hdu.header['NAME'] = configuration.name
hdu.header['LOC_LAT'] = configuration.location.latitude.value
hdu.header['LOC_LON'] = configuration.location.longitude.value
hdu.header['LOC_HGT'] = configuration.location.height.value
return hdu
def visibility_to_hdu(vis: Visibility) -> BinTableHDU:
# Convert to HDU
hdu = table_to_hdu(vis.data)
# Save rest of data into header fields (offensively ad-hoc, obviously)
pc = vis.phasecentre
hdu.header['PC_RA'] = pc.ra.to(units.deg).value
hdu.header['PC_DEC'] = pc.dec.to(units.deg).value
hdu.header['FREQ'] = ','.join(map(str, vis.frequency))
hdu.header['CONFIG'] = vis.configuration.name
return hdu
def export_visibility_to_fits(vis: Visibility, fits_file: str):
hdu = HDUList([
PrimaryHDU(),
configuration_to_hdu(vis.configuration),
visibility_to_hdu(vis)
])
with open(fits_file, "w") as f:
hdu.writeto(f, checksum=True)
def export_configuration_to_hdf5(cfg: Configuration, f: h5py.File, path: str = '/'):
grp = f.create_group(path)
grp.attrs['type'] = 'Configuration'
grp.attrs['name'] = cfg.name
grp.attrs['location'] = [cfg.location.lat.value,
cfg.location.lon.value,
cfg.location.height.value ]
for col in cfg.data.columns:
c = cfg.data[col]
# Unicode wide strings are not supported, convert to ASCII
if c.dtype.kind == 'U':
c = c.astype("S")
grp.create_dataset(col, data=c)
def export_visibility_to_hdf5(vis: Visibility, f: h5py.File, path: str = '/', maxshape={}):
grp = f.create_group(path)
grp.attrs['type'] = 'Visibility'
grp.attrs['phasecentre'] = [vis.phasecentre.ra.to(units.deg).value,
vis.phasecentre.dec.to(units.deg).value]
if vis.configuration is not None:
grp.attrs['configuration'] = vis.configuration.name
freq = numpy.array(vis.frequency)
grp.create_dataset('frequency', data=freq, maxshape=maxshape.get('frequency'))
for col in vis.data.columns:
if col == 'weight' and numpy.all(vis.data[col] == 1.0):
continue
grp.create_dataset(col, data=vis.data[col], maxshape=maxshape.get(col))
def import_configuration_from_hdf5(f: h5py.File, path: str = '/'):
"""Import telescope configuration from a HDF5 file.
:param f: Open HDF5 file to import data from
:param path: Group name to load data from
:returns: Configuration object
"""
# Access group, make sure it is the right type
grp = f[path]
assert grp.attrs['type'] == 'Configuration'
# Read table columns
table = Table()
for col in ['names', 'xyz', 'mount']:
table[col] = numpy.array(grp[col])
return Configuration(
name = grp.attrs['name'],
location = EarthLocation(lat=grp.attrs['location'][0],
lon=grp.attrs['location'][1],
height=grp.attrs['location'][2]),
data = table
)
def import_visibility_from_hdf5(f: h5py.File, path: str = '/', cfg: Configuration = None, cols = None):
"""Import visibilities from a HDF5 file.
:param f: Open HDF5 file to import data from
:param path: Group name to load data from
:param cfg: Configuration to set for visibilities
:returns: Visibilities object
"""
# Access group, make sure it is the right type
grp = f[path]
assert grp.attrs['type'] == 'Visibility'
# Read table columns
table = Table()
if cols is None:
cols = ['uvw', 'time', 'antenna1', 'antenna2', 'vis', 'weight']
for col in cols:
# Default weights to 1 if they are not found
if not col in grp and col == 'weight':
table[col] = numpy.ones(table['vis'].shape)
else:
table[col] = numpy.array(grp[col])
return Visibility(
frequency = grp['frequency'],
phasecentre = SkyCoord(ra=grp.attrs["phasecentre"][0],
dec=grp.attrs["phasecentre"][1],
frame=ICRS, unit=units.deg),
data = table
)
def import_visibility_baselines_from_hdf5(f: h5py.File, cfg: Configuration = None, cols = None):
"""Import visibilities for multiple baselines from a HDF5 group. This
means that we load every visibility set contained within the given group.
:param f: Open HDF5 file to import data from
:param cfg: Configuration to set for visibilities
:returns: Visibilities object
"""
# Collect visibilities
viss = []
for name in f:
# Get group
grp = f[name]
if not isinstance(grp, h5py.Group):
continue
# Visibilities?
if 'type' in grp.attrs and grp.attrs.get('type', '') == 'Visibility':
viss.append(import_visibility_from_hdf5(f, grp.name, cols))
print('.', end='', flush=True)
else:
# Otherwise recurse
viss += import_visibility_baselines_from_hdf5(grp, cols)
return viss
def hdu_to_configuration(hdu: BinTableHDU) -> Configuration:
lat = hdu.header.get('LOC_LAT')
lon = hdu.header.get('LOC_LON')
hgt = hdu.header.get('LOC_HGT')
loc = None
if not lat is None and not lon is None and not hgt is None:
loc = EarthLocation(lat=lat, lon=lon, height=hgt)
return Configuration(
data = Table(hdu.data),
name = hdu.header.get('NAME'),
location = loc
)
def hdu_to_visibility(hdu: BinTableHDU, configuration: Configuration = None) -> Visibility:
# Decode phase centre, if any
pc_ra = hdu.header.get('PC_RA')
pc_dec = hdu.header.get('PC_DEC')
pc = None
if not pc_ra is None and not pc_dec is None:
pc = SkyCoord(ra=pc_ra, dec=pc_dec, frame=ICRS, unit=units.deg)
# Check configuration name (additional security?)
if not configuration is None:
assert(configuration.name == hdu.header.get('CONFIG'))
return Visibility(
data = Table(hdu.data),
frequency = list(map(float, hdu.header['FREQ'].split(','))),
phasecentre = pc,
configuration = configuration
)
def import_visibility_from_fits(fits_file: str) -> Visibility:
with fits.open(fits_file) as hdulist:
# TODO: Check that it is the right kind of file...
config = hdu_to_configuration(hdulist[1])
return hdu_to_visibility(hdulist[2], config)
def import_image_from_fits(fitsfile: str):
""" Read an Image from fits
:param fitsfile:
:type str:
:returns: Image
"""
hdulist = fits.open(crocodile_path(fitsfile))
fim = Image()
fim.data = hdulist[0].data
fim.wcs = WCS(crocodile_path(fitsfile))
hdulist.close()
log.debug("import_image_from_fits: Max, min in %s = %.6f, %.6f" % (fitsfile, fim.data.max(), fim.data.min()))
return fim
def create_test_image(canonical=True):
"""Create a useful test image
This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in
M31.
:param canonical: Make the image into a 4 dimensional image
:returns: Image
"""
im = import_image_from_fits(crocodile_path("data/models/M31.MOD"))
if canonical:
im = replicate_image(im)
return im
|
SKA-ScienceDataProcessor/crocodile
|
arl/test_support.py
|
Python
|
apache-2.0
| 16,511
|
from unittest import TestCase
from pypika import (
ClickHouseQuery,
Database,
Table,
)
class ClickHouseQueryTests(TestCase):
def test_use_AS_keyword_for_alias(self):
t = Table('abc')
query = ClickHouseQuery.from_(t).select(t.foo.as_('f1'), t.bar.as_('f2'))
self.assertEqual(str(query), 'SELECT "foo" AS "f1","bar" AS "f2" FROM "abc"')
class ClickHouseDeleteTests(TestCase):
table_abc = Table("abc")
def test_omit_where(self):
q = ClickHouseQuery.from_("abc").delete()
self.assertEqual('ALTER TABLE "abc" DELETE', str(q))
def test_omit_where__table_schema(self):
q = ClickHouseQuery.from_(Table("abc", "schema1")).delete()
self.assertEqual('ALTER TABLE "schema1"."abc" DELETE', str(q))
def test_where_field_equals(self):
q1 = ClickHouseQuery.from_(self.table_abc).where(self.table_abc.foo == self.table_abc.bar).delete()
q2 = ClickHouseQuery.from_(self.table_abc).where(self.table_abc.foo.eq(self.table_abc.bar)).delete()
self.assertEqual('ALTER TABLE "abc" DELETE WHERE "foo"="bar"', str(q1))
self.assertEqual('ALTER TABLE "abc" DELETE WHERE "foo"="bar"', str(q2))
class ClickHouseUpdateTests(TestCase):
table_abc = Table("abc")
def test_update(self):
q = ClickHouseQuery.update(self.table_abc).where(self.table_abc.foo == 0).set("foo", "bar")
self.assertEqual('ALTER TABLE "abc" UPDATE "foo"=\'bar\' WHERE "foo"=0', str(q))
class ClickHouseDropQuery(TestCase):
table_abc = Table("abc")
database_xyz = Database("mydb")
cluster_name = "mycluster"
def test_drop_database(self):
q1 = ClickHouseQuery.drop_database(self.database_xyz)
q2 = ClickHouseQuery.drop_database(self.database_xyz).on_cluster(self.cluster_name)
q3 = ClickHouseQuery.drop_database(self.database_xyz).if_exists().on_cluster(self.cluster_name)
self.assertEqual('DROP DATABASE "mydb"', str(q1))
self.assertEqual('DROP DATABASE "mydb" ON CLUSTER "mycluster"', str(q2))
self.assertEqual('DROP DATABASE IF EXISTS "mydb" ON CLUSTER "mycluster"', str(q3))
def test_drop_table(self):
q1 = ClickHouseQuery.drop_table(self.table_abc)
q2 = ClickHouseQuery.drop_table(self.table_abc).on_cluster(self.cluster_name)
q3 = ClickHouseQuery.drop_table(self.table_abc).if_exists().on_cluster(self.cluster_name)
self.assertEqual('DROP TABLE "abc"', str(q1))
self.assertEqual('DROP TABLE "abc" ON CLUSTER "mycluster"', str(q2))
self.assertEqual('DROP TABLE IF EXISTS "abc" ON CLUSTER "mycluster"', str(q3))
def test_drop_dictionary(self):
q1 = ClickHouseQuery.drop_dictionary("dict")
q2 = ClickHouseQuery.drop_dictionary("dict").on_cluster(self.cluster_name)
q3 = ClickHouseQuery.drop_dictionary("dict").if_exists().on_cluster(self.cluster_name)
self.assertEqual('DROP DICTIONARY "dict"', str(q1))
self.assertEqual('DROP DICTIONARY "dict"', str(q2)) # NO CLUSTER
self.assertEqual('DROP DICTIONARY IF EXISTS "dict"', str(q3)) # NO CLUSTER
def test_drop_other(self):
q1 = ClickHouseQuery.drop_quota("myquota")
q2 = ClickHouseQuery.drop_user("myuser")
q3 = ClickHouseQuery.drop_view("myview")
self.assertEqual('DROP QUOTA "myquota"', str(q1))
self.assertEqual('DROP USER "myuser"', str(q2))
self.assertEqual('DROP VIEW "myview"', str(q3))
|
kayak/pypika
|
pypika/tests/dialects/test_clickhouse.py
|
Python
|
apache-2.0
| 3,473
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import mock
import string
import time
from brick import exception
from brick.i18n import _
from brick.initiator import connector
from brick.initiator import host_driver
from brick.openstack.common import log as logging
from brick.openstack.common import loopingcall
from brick.openstack.common import processutils as putils
from brick import test
LOG = logging.getLogger(__name__)
class ConnectorTestCase(test.TestCase):
def setUp(self):
super(ConnectorTestCase, self).setUp()
self.cmds = []
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def test_connect_volume(self):
self.connector = connector.InitiatorConnector(None)
self.assertRaises(NotImplementedError,
self.connector.connect_volume, None)
def test_disconnect_volume(self):
self.connector = connector.InitiatorConnector(None)
self.assertRaises(NotImplementedError,
self.connector.disconnect_volume, None, None)
def test_factory(self):
obj = connector.InitiatorConnector.factory('iscsi', None)
self.assertEqual(obj.__class__.__name__, "ISCSIConnector")
obj = connector.InitiatorConnector.factory('fibre_channel', None)
self.assertEqual(obj.__class__.__name__, "FibreChannelConnector")
obj = connector.InitiatorConnector.factory('aoe', None)
self.assertEqual(obj.__class__.__name__, "AoEConnector")
obj = connector.InitiatorConnector.factory(
'nfs', None, nfs_mount_point_base='/mnt/test')
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory(
'glusterfs', None, glusterfs_mount_point_base='/mnt/test')
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory('local', None)
self.assertEqual(obj.__class__.__name__, "LocalConnector")
self.assertRaises(ValueError,
connector.InitiatorConnector.factory,
"bogus", None)
def test_check_valid_device_with_wrong_path(self):
self.connector = connector.InitiatorConnector(None)
self.connector._execute = \
lambda *args, **kwargs: ("", None)
self.assertFalse(self.connector.check_valid_device('/d0v'))
def test_check_valid_device(self):
self.connector = connector.InitiatorConnector(None)
self.connector._execute = \
lambda *args, **kwargs: ("", "")
self.assertTrue(self.connector.check_valid_device('/dev'))
def test_check_valid_device_with_cmd_error(self):
def raise_except(*args, **kwargs):
raise putils.ProcessExecutionError
self.connector = connector.InitiatorConnector(None)
self.connector._execute = mock.Mock()
self.connector._execute.side_effect = raise_except
self.assertFalse(self.connector.check_valid_device('/dev'))
class HostDriverTestCase(test.TestCase):
def setUp(self):
super(HostDriverTestCase, self).setUp()
isdir_mock = mock.Mock()
isdir_mock.return_value = True
os.path.isdir = isdir_mock
self.devlist = ['device1', 'device2']
listdir_mock = mock.Mock()
listdir_mock.return_value = self.devlist
os.listdir = listdir_mock
def test_host_driver(self):
expected = ['/dev/disk/by-path/' + dev for dev in self.devlist]
driver = host_driver.HostDriver()
actual = driver.get_all_block_devices()
self.assertEqual(expected, actual)
class ISCSIConnectorTestCase(ConnectorTestCase):
def setUp(self):
super(ISCSIConnectorTestCase, self).setUp()
self.connector = connector.ISCSIConnector(
None, execute=self.fake_execute, use_multipath=False)
get_name_mock = mock.Mock()
get_name_mock.return_value = "/dev/sdb"
self.connector._linuxscsi.get_name_from_path = get_name_mock
def iscsi_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
}
}
def test_get_initiator(self):
def initiator_no_file(*args, **kwargs):
raise putils.ProcessExecutionError('No file')
def initiator_get_text(*arg, **kwargs):
text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n'
'## If you remove this file, the iSCSI daemon '
'will not start.\n'
'## If you change the InitiatorName, existing '
'access control lists\n'
'## may reject this initiator. The InitiatorName must '
'be unique\n'
'## for each iSCSI initiator. Do NOT duplicate iSCSI '
'InitiatorNames.\n'
'InitiatorName=iqn.1234-56.foo.bar:01:23456789abc')
return text, None
self.connector._execute = initiator_no_file
initiator = self.connector.get_initiator()
self.assertIsNone(initiator)
self.connector._execute = initiator_get_text
initiator = self.connector.get_initiator()
self.assertEqual(initiator, 'iqn.1234-56.foo.bar:01:23456789abc')
@test.testtools.skipUnless(os.path.exists('/dev/disk/by-path'),
'Test requires /dev/disk/by-path')
def test_connect_volume(self):
self.stubs.Set(os.path, 'exists', lambda x: True)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
device = self.connector.connect_volume(connection_info['data'])
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(device['type'], 'block')
self.assertEqual(device['path'], dev_str)
self.connector.disconnect_volume(connection_info['data'], device)
expected_commands = [('iscsiadm -m node -T %s -p %s' %
(iqn, location)),
('iscsiadm -m session'),
('iscsiadm -m node -T %s -p %s --login' %
(iqn, location)),
('iscsiadm -m node -T %s -p %s --op update'
' -n node.startup -v automatic'
% (iqn, location)),
('iscsiadm -m node --rescan'),
('iscsiadm -m session --rescan'),
('blockdev --flushbufs /dev/sdb'),
('tee -a /sys/block/sdb/device/delete'),
('iscsiadm -m node -T %s -p %s --op update'
' -n node.startup -v manual' % (iqn, location)),
('iscsiadm -m node -T %s -p %s --logout' %
(iqn, location)),
('iscsiadm -m node -T %s -p %s --op delete' %
(iqn, location)), ]
LOG.debug("self.cmds = %s" % self.cmds)
LOG.debug("expected = %s" % expected_commands)
self.assertEqual(expected_commands, self.cmds)
def test_connect_volume_with_multipath(self):
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_properties = self.iscsi_connection(vol, location, iqn)
self.connector_with_multipath = \
connector.ISCSIConnector(None, use_multipath=True)
self.connector_with_multipath._run_iscsiadm_bare = \
lambda *args, **kwargs: "%s %s" % (location, iqn)
portals_mock = mock.Mock()
portals_mock.return_value = [[location, iqn]]
self.connector_with_multipath.\
_get_target_portals_from_iscsiadm_output = portals_mock
connect_to_mock = mock.Mock()
connect_to_mock.return_value = None
self.connector_with_multipath._connect_to_iscsi_portal = \
connect_to_mock
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
self.connector_with_multipath._rescan_iscsi = rescan_iscsi_mock
rescan_multipath_mock = mock.Mock()
rescan_multipath_mock.return_value = None
self.connector_with_multipath._rescan_multipath = \
rescan_multipath_mock
get_device_mock = mock.Mock()
get_device_mock.return_value = 'iqn.2010-10.org.openstack:%s' % name
self.connector_with_multipath._get_multipath_device_name = \
get_device_mock
exists_mock = mock.Mock()
exists_mock.return_value = True
os.path.exists = exists_mock
result = self.connector_with_multipath.connect_volume(
connection_properties['data'])
expected_result = {'path': 'iqn.2010-10.org.openstack:volume-00000001',
'type': 'block'}
self.assertEqual(result, expected_result)
def test_connect_volume_with_not_found_device(self):
exists_mock = mock.Mock()
exists_mock.return_value = False
os.path.exists = exists_mock
sleep_mock = mock.Mock()
sleep_mock.return_value = None
time.sleep = sleep_mock
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
self.assertRaises(exception.VolumeDeviceNotFound,
self.connector.connect_volume,
connection_info['data'])
def test_get_target_portals_from_iscsiadm_output(self):
connector = self.connector
test_output = '''10.15.84.19:3260 iqn.1992-08.com.netapp:sn.33615311
10.15.85.19:3260 iqn.1992-08.com.netapp:sn.33615311'''
res = connector._get_target_portals_from_iscsiadm_output(test_output)
ip_iqn1 = ['10.15.84.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
ip_iqn2 = ['10.15.85.19:3260', 'iqn.1992-08.com.netapp:sn.33615311']
expected = [ip_iqn1, ip_iqn2]
self.assertEqual(expected, res)
def test_get_multipath_device_name(self):
realpath = mock.Mock()
realpath.return_value = None
os.path.realpath = realpath
multipath_return_string = [('mpath2 (20017380006c00036)'
'dm-7 IBM,2810XIV')]
self.connector._run_multipath = \
lambda *args, **kwargs: multipath_return_string
expected = '/dev/mapper/mpath2'
self.assertEqual(expected,
self.connector.
_get_multipath_device_name('/dev/md-1'))
def test_get_iscsi_devices(self):
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
'com.netapp:node.netapp02-lun-0')]
walk_mock = lambda x: [(['.'], ['by-path'], paths)]
os.walk = walk_mock
self.assertEqual(self.connector._get_iscsi_devices(), paths)
def test_get_iscsi_devices_with_empty_dir(self):
walk_mock = mock.Mock()
walk_mock.return_value = []
os.walk = walk_mock
self.assertEqual(self.connector._get_iscsi_devices(), [])
def test_get_multipath_iqn(self):
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
'com.netapp:node.netapp02-lun-0')]
realpath = lambda x: '/dev/disk/by-path/%s' % paths[0]
os.path.realpath = realpath
get_iscsi_mock = mock.Mock()
get_iscsi_mock.return_value = paths
self.connector._get_iscsi_devices = get_iscsi_mock
get_multipath_device_mock = mock.Mock()
get_multipath_device_mock.return_value = paths[0]
self.connector._get_multipath_device_name = get_multipath_device_mock
self.assertEqual(self.connector._get_multipath_iqn(paths[0]),
'iqn.2013-01.ro.com.netapp:node.netapp02')
def test_disconnect_volume_multipath_iscsi(self):
result = []
def fake_disconnect_from_iscsi_portal(properties):
result.append(properties)
iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01'
iqn2 = 'iqn.2013-01.ro.com.netapp:node.netapp02'
iqns = [iqn1, iqn2]
portal = '10.0.0.1:3260'
dev = ('ip-%s-iscsi-%s-lun-0' % (portal, iqn1))
get_portals_mock = mock.Mock()
get_portals_mock.return_value = [[portal, iqn1]]
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
rescan_multipath = mock.Mock()
rescan_multipath.return_value = None
get_block_devices_mock = mock.Mock()
get_block_devices_mock.return_value = [dev, '/dev/mapper/md-1']
get_multipath_name_mock = mock.Mock()
get_multipath_name_mock.return_value = '/dev/mapper/md-3'
self.connector._get_multipath_iqn = lambda x: iqns.pop()
disconnect_mock = fake_disconnect_from_iscsi_portal
self.connector._disconnect_from_iscsi_portal = disconnect_mock
fake_property = {'target_portal': portal,
'target_iqn': iqn1}
self.connector._disconnect_volume_multipath_iscsi(fake_property,
'fake/multipath')
# Target in use by other mp devices, don't disconnect
self.assertEqual([], result)
def test_disconnect_volume_multipath_iscsi_without_other_mp_devices(self):
result = []
def fake_disconnect_from_iscsi_portal(properties):
result.append(properties)
portal = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
get_portals_mock = mock.Mock()
get_portals_mock.return_value = [[portal, iqn]]
self.connector._get_target_portals_from_iscsiadm_output = \
get_portals_mock
rescan_iscsi_mock = mock.Mock()
rescan_iscsi_mock.return_value = None
self.connector._rescan_iscsi = rescan_iscsi_mock
rescan_multipath_mock = mock.Mock()
rescan_multipath_mock.return_value = None
self.connector._rescan_multipath = rescan_multipath_mock
get_all_devices_mock = mock.Mock()
get_all_devices_mock.return_value = []
self.connector.driver.get_all_block_devices = get_all_devices_mock
self.connector._disconnect_from_iscsi_portal = \
fake_disconnect_from_iscsi_portal
fake_property = {'target_portal': portal,
'target_iqn': iqn}
self.connector._disconnect_volume_multipath_iscsi(fake_property,
'fake/multipath')
# Target not in use by other mp devices, disconnect
self.assertEqual([fake_property], result)
class FibreChannelConnectorTestCase(ConnectorTestCase):
def setUp(self):
super(FibreChannelConnectorTestCase, self).setUp()
self.connector = connector.FibreChannelConnector(
None, execute=self.fake_execute, use_multipath=False)
self.assertIsNotNone(self.connector)
self.assertIsNotNone(self.connector._linuxfc)
self.assertIsNotNone(self.connector._linuxscsi)
def fake_get_fc_hbas(self):
return [{'ClassDevice': 'host1',
'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
'/0000:05:00.2/host1/fc_host/host1',
'dev_loss_tmo': '30',
'fabric_name': '0x1000000533f55566',
'issue_lip': '<store method only>',
'max_npiv_vports': '255',
'maxframe_size': '2048 bytes',
'node_name': '0x200010604b019419',
'npiv_vports_inuse': '0',
'port_id': '0x680409',
'port_name': '0x100010604b019419',
'port_state': 'Online',
'port_type': 'NPort (fabric via point-to-point)',
'speed': '10 Gbit',
'supported_classes': 'Class 3',
'supported_speeds': '10 Gbit',
'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
'tgtid_bind_type': 'wwpn (World Wide Port Name)',
'uevent': None,
'vport_create': '<store method only>',
'vport_delete': '<store method only>'}]
def fake_get_fc_hbas_info(self):
hbas = self.fake_get_fc_hbas()
info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
'node_name': hbas[0]['node_name'].replace('0x', ''),
'host_device': hbas[0]['ClassDevice'],
'device_path': hbas[0]['ClassDevicePath']}]
return info
def fibrechan_connection(self, volume, location, wwn):
return {'driver_volume_type': 'fibrechan',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_wwn': wwn,
'target_lun': 1,
}}
def test_connect_volume(self):
self.connector._linuxfc.get_fc_hbas = self.fake_get_fc_hbas
self.connector._linuxfc.get_fc_hbas_info = \
self.fake_get_fc_hbas_info
exists_mock = mock.Mock()
exists_mock.return_value = True
os.path.exists = exists_mock
realpath_mock = mock.Mock()
realpath_mock.return_value = '/dev/sdb'
os.path.realpath = realpath_mock
multipath_devname = '/dev/md-1'
devices = {"device": multipath_devname,
"id": "1234567890",
"devices": [{'device': '/dev/sdb',
'address': '1:0:0:1',
'host': 1, 'channel': 0,
'id': 0, 'lun': 1}]}
find_device_mock = mock.Mock()
find_device_mock.return_value = devices
self.connector._linuxscsi.find_multipath_device = find_device_mock
remove_device_mock = mock.Mock()
remove_device_mock.return_value = None
self.connector._linuxscsi.remove_scsi_device = remove_device_mock
get_device_info_mock = mock.Mock()
get_device_info_mock.return_value = devices['devices'][0]
self.connector._linuxscsi.get_device_info = get_device_info_mock
location = '10.0.2.15:3260'
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
# Should work for string, unicode, and list
wwns = ['1234567890123456', unicode('1234567890123456'),
['1234567890123456', '1234567890123457']]
for wwn in wwns:
connection_info = self.fibrechan_connection(vol, location, wwn)
dev_info = self.connector.connect_volume(connection_info['data'])
exp_wwn = wwn[0] if isinstance(wwn, list) else wwn
dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' %
exp_wwn)
self.assertEqual(dev_info['type'], 'block')
self.assertEqual(dev_info['path'], dev_str)
self.connector.disconnect_volume(connection_info['data'], dev_info)
expected_commands = []
self.assertEqual(expected_commands, self.cmds)
# Should not work for anything other than string, unicode, and list
connection_info = self.fibrechan_connection(vol, location, 123)
self.assertRaises(exception.NoFibreChannelHostsFound,
self.connector.connect_volume,
connection_info['data'])
get_fc_hbas_mock = mock.Mock()
get_fc_hbas_mock.return_value = []
self.connector._linuxfc.get_fc_hbas = get_fc_hbas_mock
get_fc_hbas_info_mock = mock.Mock()
get_fc_hbas_info_mock.return_value = []
self.connector._linuxfc.get_fc_hbas_info = get_fc_hbas_info_mock
self.assertRaises(exception.NoFibreChannelHostsFound,
self.connector.connect_volume,
connection_info['data'])
class FakeFixedIntervalLoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._stop = False
def stop(self):
self._stop = True
def wait(self):
return self
def start(self, interval, initial_delay=None):
while not self._stop:
try:
self.f(*self.args, **self.kw)
except loopingcall.LoopingCallDone:
return self
except Exception:
LOG.exception(_('in fixed duration looping call'))
raise
class AoEConnectorTestCase(ConnectorTestCase):
"""Test cases for AoE initiator class."""
def setUp(self):
super(AoEConnectorTestCase, self).setUp()
self.connector = connector.AoEConnector('sudo')
self.connection_properties = {'target_shelf': 'fake_shelf',
'target_lun': 'fake_lun'}
loopingcall.FixedIntervalLoopingCall = FakeFixedIntervalLoopingCall
def _mock_path_exists(self, aoe_path, mock_values=None):
exists_mock = mock.Mock()
exists_mock.return_value = mock_values
os.path.exists = exists_mock
def test_connect_volume(self):
"""Ensure that if path exist aoe-revaliadte was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
self._mock_path_exists(aoe_path, [True, True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.connector.connect_volume(self.connection_properties)
def test_connect_volume_without_path(self):
"""Ensure that if path doesn't exist aoe-discovery was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
expected_info = {
'type': 'block',
'device': aoe_device,
'path': aoe_path,
}
self._mock_path_exists(aoe_path, [False, True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
volume_info = self.connector.connect_volume(
self.connection_properties)
self.assertDictMatch(volume_info, expected_info)
def test_connect_volume_could_not_discover_path(self):
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
exists_mock = mock.Mock()
exists_mock.return_value = False
os.path.exists = exists_mock
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.assertRaises(exception.VolumeDeviceNotFound,
self.connector.connect_volume,
self.connection_properties)
def test_disconnect_volume(self):
"""Ensure that if path exist aoe-revaliadte was called."""
aoe_device, aoe_path = self.connector._get_aoe_info(
self.connection_properties)
self._mock_path_exists(aoe_path, [True])
exec_mock = mock.Mock()
exec_mock.return_value = ["", ""]
self.connector._execute = exec_mock
self.connector.disconnect_volume(self.connection_properties, {})
class RemoteFsConnectorTestCase(ConnectorTestCase):
"""Test cases for Remote FS initiator class."""
TEST_DEV = '172.18.194.100:/var/nfs'
TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328'
def setUp(self):
super(RemoteFsConnectorTestCase, self).setUp()
self.connection_properties = {
'export': self.TEST_DEV,
'name': '9c592d52-ce47-4263-8c21-4ecf3c029cdb'}
self.connector = connector.RemoteFsConnector(
'nfs', root_helper='sudo', nfs_mount_point_base='/mnt/test',
nfs_mount_options='vers=3')
def test_connect_volume(self):
"""Test the basic connect volume case."""
client = self.connector._remotefsclient
client.mount = mock.Mock()
client.get_mount_point = mock.Mock()
client.get_mount_point.return_value = "ass"
self.connector.connect_volume(self.connection_properties)
def test_disconnect_volume(self):
"""Nothing should happen here -- make sure it doesn't blow up."""
self.connector.disconnect_volume(self.connection_properties, {})
class LocalConnectorTestCase(test.TestCase):
def setUp(self):
super(LocalConnectorTestCase, self).setUp()
self.connection_properties = {'name': 'foo',
'device_path': '/tmp/bar'}
def test_connect_volume(self):
self.connector = connector.LocalConnector(None)
cprops = self.connection_properties
dev_info = self.connector.connect_volume(cprops)
self.assertEqual(dev_info['type'], 'local')
self.assertEqual(dev_info['path'], cprops['device_path'])
def test_connect_volume_with_invalid_connection_data(self):
self.connector = connector.LocalConnector(None)
cprops = {}
self.assertRaises(ValueError,
self.connector.connect_volume, cprops)
|
hemna/cinder-brick
|
brick/tests/initiator/test_connector.py
|
Python
|
apache-2.0
| 26,481
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# write a correct test!
import unittest
import pygimli as pg
from pygimli.core.matrix import RVector, MatrixBase
class Matrix(pg.matrix.MatrixBase):
def __init__(self):
pg.MatrixBase.__init__(self)
def rows(self): return 1
def cols(self): return 1
def mult(self, b):
ret = pg.Vector(self.rows())
print("TestMatrix::mult")
return ret
def transMult(self, b):
ret = pg.Vector(self.cols())
print("TestMatrix::transMult")
return ret
def save(self, name):
print("TestMatrix::save", name)
class Modelling(pg.core.ModellingBase):
def __init__(self):
pg.core.ModellingBase.__init__(self, True)
self.regionManager().setParameterCount(1)
self.mat = Matrix()
def response(self, model):
print("TestModelling::response")
res = pg.Vector(1, 1.0)
return res
def jacobian(self):
print("TestModelling::jacobian()")
return self.mat
def createJacobian(self, model):
print("TestModelling::createJacobian")
class TestOwnMatrix(unittest.TestCase):
def test_runFOP(self):
#F = TestModelling()
#dat = pg.Vector(1, 1)
#err = pg.Vector(1, 0.00001)
#inv = pg.Inversion(dat, F, True, True)
#inv.setError(err)
#inv.run()
pass
if __name__ == '__main__':
unittest.main()
|
gimli-org/gimli
|
pygimli/testing/test_OwnMatrix.py
|
Python
|
apache-2.0
| 1,449
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ProcessorCollectionProcessorCollection(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ProcessorCollectionProcessorCollection - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'members': 'list[ProcessorProcessor]',
'membersodata_count': 'Odata400Count',
'membersodata_navigation_link': 'Odata400IdRef',
'name': 'ResourceName',
'oem': 'ResourceOem'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'members': 'Members',
'membersodata_count': 'Members@odata.count',
'membersodata_navigation_link': 'Members@odata.navigationLink',
'name': 'Name',
'oem': 'Oem'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._members = None
self._membersodata_count = None
self._membersodata_navigation_link = None
self._name = None
self._oem = None
@property
def odata_context(self):
"""
Gets the odata_context of this ProcessorCollectionProcessorCollection.
:return: The odata_context of this ProcessorCollectionProcessorCollection.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this ProcessorCollectionProcessorCollection.
:param odata_context: The odata_context of this ProcessorCollectionProcessorCollection.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this ProcessorCollectionProcessorCollection.
:return: The odata_id of this ProcessorCollectionProcessorCollection.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this ProcessorCollectionProcessorCollection.
:param odata_id: The odata_id of this ProcessorCollectionProcessorCollection.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this ProcessorCollectionProcessorCollection.
:return: The odata_type of this ProcessorCollectionProcessorCollection.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this ProcessorCollectionProcessorCollection.
:param odata_type: The odata_type of this ProcessorCollectionProcessorCollection.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this ProcessorCollectionProcessorCollection.
:return: The description of this ProcessorCollectionProcessorCollection.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ProcessorCollectionProcessorCollection.
:param description: The description of this ProcessorCollectionProcessorCollection.
:type: ResourceDescription
"""
self._description = description
@property
def members(self):
"""
Gets the members of this ProcessorCollectionProcessorCollection.
Contains the members of this collection.
:return: The members of this ProcessorCollectionProcessorCollection.
:rtype: list[ProcessorProcessor]
"""
return self._members
@members.setter
def members(self, members):
"""
Sets the members of this ProcessorCollectionProcessorCollection.
Contains the members of this collection.
:param members: The members of this ProcessorCollectionProcessorCollection.
:type: list[ProcessorProcessor]
"""
self._members = members
@property
def membersodata_count(self):
"""
Gets the membersodata_count of this ProcessorCollectionProcessorCollection.
:return: The membersodata_count of this ProcessorCollectionProcessorCollection.
:rtype: Odata400Count
"""
return self._membersodata_count
@membersodata_count.setter
def membersodata_count(self, membersodata_count):
"""
Sets the membersodata_count of this ProcessorCollectionProcessorCollection.
:param membersodata_count: The membersodata_count of this ProcessorCollectionProcessorCollection.
:type: Odata400Count
"""
self._membersodata_count = membersodata_count
@property
def membersodata_navigation_link(self):
"""
Gets the membersodata_navigation_link of this ProcessorCollectionProcessorCollection.
:return: The membersodata_navigation_link of this ProcessorCollectionProcessorCollection.
:rtype: Odata400IdRef
"""
return self._membersodata_navigation_link
@membersodata_navigation_link.setter
def membersodata_navigation_link(self, membersodata_navigation_link):
"""
Sets the membersodata_navigation_link of this ProcessorCollectionProcessorCollection.
:param membersodata_navigation_link: The membersodata_navigation_link of this ProcessorCollectionProcessorCollection.
:type: Odata400IdRef
"""
self._membersodata_navigation_link = membersodata_navigation_link
@property
def name(self):
"""
Gets the name of this ProcessorCollectionProcessorCollection.
:return: The name of this ProcessorCollectionProcessorCollection.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ProcessorCollectionProcessorCollection.
:param name: The name of this ProcessorCollectionProcessorCollection.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this ProcessorCollectionProcessorCollection.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this ProcessorCollectionProcessorCollection.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this ProcessorCollectionProcessorCollection.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this ProcessorCollectionProcessorCollection.
:type: ResourceOem
"""
self._oem = oem
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
jlongever/redfish-client-python
|
on_http_redfish_1_0/models/processor_collection_processor_collection.py
|
Python
|
apache-2.0
| 9,550
|
"""
Usage:
feed_checker.py CALENDAR_PATH CREDENTIALS_PATH EVENTS_OUTPUT
Arguments:
CALENDAR_PATH path to ical file
CREDENTIALS_PATH path to file with google user credentials
EVENTS_OUTPUT path to file to save upcoming events for hangout runner
"""
from icalendar import Calendar
import arrow
from yaml import dump
from docopt import docopt
def get_upcomming_events_from_google_ical_file(ical_path):
gcal = Calendar.from_ical(open(ical_path, 'rb').read())
now = arrow.utcnow()
events = (item for item in gcal.subcomponents if item.name == "VEVENT")
upcomming_events = (e for e in events if arrow.get(e['DTSTART'].dt) > now)
return [
dict(event_url=str(event.get('URL', '')),
start_time=event['DTSTART'].dt.isoformat(),
end_time=event['DTEND'].dt.isoformat(),
last_modified=event['LAST-MODIFIED'].dt.isoformat(),
created=event['CREATED'].dt.isoformat(),
uid=str(event['UID']),
# XXX: can't find a way to get event attendees list
# attendees=[str(email)[7:] for email in event['ATTENDEE']]
# if isinstance(event['ATTENDEE'], list) else
# [str(event['ATTENDEE'])[7:]],
attendees=None,
description=str(event['DESCRIPTION']),
location=str(event['LOCATION']),
summary=str(event['SUMMARY']),)
for event in upcomming_events]
def main(calendar_path, credentials_path, events_output):
# get all upcoming events from feed
events = get_upcomming_events_from_google_ical_file(calendar_path)
# save events to yaml file for internal use
with open(events_output, 'w') as events_output_file:
events_output_file.write(
dump({'upcoming_events': events}, default_flow_style=False))
if __name__ == "__main__":
arguments = docopt(__doc__)
main(calendar_path=arguments['CALENDAR_PATH'],
credentials_path=arguments['CREDENTIALS_PATH'],
events_output=arguments['EVENTS_OUTPUT'])
|
enkidulan/hangout_automated_manager
|
feed_checker.py
|
Python
|
apache-2.0
| 2,080
|